repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
embeddedarm/android_external_chromium_org | tools/run-bisect-perf-regression.py | 26 | 14140 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Performance Test Bisect Tool
This script is used by a trybot to run the src/tools/bisect-perf-regression.py
script with the parameters specified in run-bisect-perf-regression.cfg. It will
check out a copy of the depot in a subdirectory 'bisect' of the working
directory provided, and run the bisect-perf-regression.py script there.
"""
import imp
import optparse
import os
import subprocess
import sys
import traceback
import bisect_utils
bisect = imp.load_source('bisect-perf-regression',
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
'bisect-perf-regression.py'))
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
class Goma(object):
def __init__(self, path_to_goma):
self._abs_path_to_goma = None
self._abs_path_to_goma_file = None
if path_to_goma:
self._abs_path_to_goma = os.path.abspath(path_to_goma)
self._abs_path_to_goma_file = self._GetExecutablePath(
self._abs_path_to_goma)
def __enter__(self):
if self._HasGOMAPath():
self._SetupAndStart()
return self
def __exit__(self, *_):
if self._HasGOMAPath():
self._Stop()
def _HasGOMAPath(self):
return bool(self._abs_path_to_goma)
def _GetExecutablePath(self, path_to_goma):
if os.name == 'nt':
return os.path.join(path_to_goma, 'goma_ctl.bat')
else:
return os.path.join(path_to_goma, 'goma_ctl.sh')
def _SetupEnvVars(self):
if os.name == 'nt':
os.environ['CC'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
os.environ['CXX'] = (os.path.join(self._abs_path_to_goma, 'gomacc.exe') +
' cl.exe')
else:
os.environ['PATH'] = os.pathsep.join([self._abs_path_to_goma,
os.environ['PATH']])
def _SetupAndStart(self):
"""Sets up GOMA and launches it.
Args:
path_to_goma: Path to goma directory.
Returns:
True if successful."""
self._SetupEnvVars()
# Sometimes goma is lingering around if something went bad on a previous
# run. Stop it before starting a new process. Can ignore the return code
# since it will return an error if it wasn't running.
self._Stop()
if subprocess.call([self._abs_path_to_goma_file, 'start']):
raise RuntimeError('GOMA failed to start.')
def _Stop(self):
subprocess.call([self._abs_path_to_goma_file, 'stop'])
def _LoadConfigFile(path_to_file):
"""Attempts to load the specified config file as a module
and grab the global config dict.
Args:
path_to_file: Path to the file.
Returns:
The config dict which should be formatted as follows:
{'command': string, 'good_revision': string, 'bad_revision': string
'metric': string, etc...}.
Returns None on failure.
"""
try:
local_vars = {}
execfile(path_to_file, local_vars)
return local_vars['config']
except:
print
traceback.print_exc()
print
return {}
def _OutputFailedResults(text_to_print):
bisect_utils.OutputAnnotationStepStart('Results - Failed')
print
print text_to_print
print
bisect_utils.OutputAnnotationStepClosed()
def _CreateBisectOptionsFromConfig(config):
opts_dict = {}
opts_dict['command'] = config['command']
opts_dict['metric'] = config['metric']
if config['repeat_count']:
opts_dict['repeat_test_count'] = int(config['repeat_count'])
if config['truncate_percent']:
opts_dict['truncate_percent'] = int(config['truncate_percent'])
if config['max_time_minutes']:
opts_dict['max_time_minutes'] = int(config['max_time_minutes'])
if config.has_key('use_goma'):
opts_dict['use_goma'] = config['use_goma']
opts_dict['build_preference'] = 'ninja'
opts_dict['output_buildbot_annotations'] = True
if '--browser=cros' in config['command']:
opts_dict['target_platform'] = 'cros'
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV]
opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV]
else:
raise RuntimeError('Cros build selected, but BISECT_CROS_IP or'
'BISECT_CROS_BOARD undefined.')
elif 'android' in config['command']:
if 'android-chrome' in config['command']:
opts_dict['target_platform'] = 'android-chrome'
else:
opts_dict['target_platform'] = 'android'
return bisect.BisectOptions.FromDict(opts_dict)
def _RunPerformanceTest(config, path_to_file):
# Bisect script expects to be run from src
os.chdir(os.path.join(path_to_file, '..'))
bisect_utils.OutputAnnotationStepStart('Building With Patch')
opts = _CreateBisectOptionsFromConfig(config)
b = bisect.BisectPerformanceMetrics(None, opts)
if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks')
if not b.BuildCurrentRevision('chromium'):
raise RuntimeError('Patched version failed to build.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Running With Patch')
results_with_patch = b.RunPerformanceTestAndParseResults(
opts.command, opts.metric, reset_on_first_run=True, results_label='Patch')
if results_with_patch[1]:
raise RuntimeError('Patched version failed to run performance test.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Reverting Patch')
if bisect_utils.RunGClient(['revert']):
raise RuntimeError('Failed to run gclient runhooks')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Building Without Patch')
if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks')
if not b.BuildCurrentRevision('chromium'):
raise RuntimeError('Unpatched version failed to build.')
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Running Without Patch')
results_without_patch = b.RunPerformanceTestAndParseResults(
opts.command, opts.metric, upload_on_last_run=True, results_label='ToT')
if results_without_patch[1]:
raise RuntimeError('Unpatched version failed to run performance test.')
# Find the link to the cloud stored results file.
output = results_without_patch[2]
cloud_file_link = [t for t in output.splitlines()
if 'storage.googleapis.com/chromium-telemetry/html-results/' in t]
if cloud_file_link:
# What we're getting here is basically "View online at http://..." so parse
# out just the url portion.
cloud_file_link = cloud_file_link[0]
cloud_file_link = [t for t in cloud_file_link.split(' ')
if 'storage.googleapis.com/chromium-telemetry/html-results/' in t]
assert cloud_file_link, "Couldn't parse url from output."
cloud_file_link = cloud_file_link[0]
else:
cloud_file_link = ''
# Calculate the % difference in the means of the 2 runs.
percent_diff_in_means = (results_with_patch[0]['mean'] /
max(0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
std_err = bisect.CalculatePooledStandardError(
[results_with_patch[0]['values'], results_without_patch[0]['values']])
bisect_utils.OutputAnnotationStepClosed()
bisect_utils.OutputAnnotationStepStart('Results - %.02f +- %0.02f delta' %
(percent_diff_in_means, std_err))
print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '),
'Std. Error'.center(20, ' '))
print ' %s %s %s' % ('Patch'.center(10, ' '),
('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
print ' %s %s %s' % ('No Patch'.center(10, ' '),
('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
if cloud_file_link:
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
bisect_utils.OutputAnnotationStepClosed()
def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
"""Attempts to build and run the current revision with and without the
current patch, with the parameters passed in.
Args:
config: The config read from run-perf-test.cfg.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory.
Returns:
0 on success, otherwise 1.
"""
try:
with Goma(path_to_goma) as goma:
config['use_goma'] = bool(path_to_goma)
_RunPerformanceTest(config, path_to_file)
return 0
except RuntimeError, e:
bisect_utils.OutputAnnotationStepClosed()
_OutputFailedResults('Error: %s' % e.message)
return 1
def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma,
path_to_extra_src, dry_run):
"""Attempts to execute src/tools/bisect-perf-regression.py with the parameters
passed in.
Args:
config: A dict containing the parameters to pass to the script.
working_directory: A working directory to provide to the
bisect-perf-regression.py script, where it will store it's own copy of
the depot.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory.
path_to_extra_src: Path to extra source file.
dry_run: Do a dry run, skipping sync, build, and performance testing steps.
Returns:
0 on success, otherwise 1.
"""
bisect_utils.OutputAnnotationStepStart('Config')
print
for k, v in config.iteritems():
print ' %s : %s' % (k, v)
print
bisect_utils.OutputAnnotationStepClosed()
cmd = ['python', os.path.join(path_to_file, 'bisect-perf-regression.py'),
'-c', config['command'],
'-g', config['good_revision'],
'-b', config['bad_revision'],
'-m', config['metric'],
'--working_directory', working_directory,
'--output_buildbot_annotations']
if config['repeat_count']:
cmd.extend(['-r', config['repeat_count']])
if config['truncate_percent']:
cmd.extend(['-t', config['truncate_percent']])
if config['max_time_minutes']:
cmd.extend(['--max_time_minutes', config['max_time_minutes']])
cmd.extend(['--build_preference', 'ninja'])
if '--browser=cros' in config['command']:
cmd.extend(['--target_platform', 'cros'])
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
else:
print 'Error: Cros build selected, but BISECT_CROS_IP or'\
'BISECT_CROS_BOARD undefined.'
print
return 1
if 'android' in config['command']:
if 'android-chrome' in config['command']:
cmd.extend(['--target_platform', 'android-chrome'])
else:
cmd.extend(['--target_platform', 'android'])
if path_to_goma:
cmd.append('--use_goma')
if path_to_extra_src:
cmd.extend(['--extra_src', path_to_extra_src])
if dry_run:
cmd.extend(['--debug_ignore_build', '--debug_ignore_sync',
'--debug_ignore_perf_test'])
cmd = [str(c) for c in cmd]
with Goma(path_to_goma) as goma:
return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect-perf-regression.py returned with error %d' %\
return_code
print
return return_code
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a trybot to run the bisection script using the parameters'
' provided in the run-bisect-perf-regression.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('-p', '--path_to_goma',
type='str',
help='Path to goma directory. If this is supplied, goma '
'builds will be enabled.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--dry_run',
action="store_true",
help='The script will perform the full bisect, but '
'without syncing, building, or running the performance '
'tests.')
(opts, args) = parser.parse_args()
path_to_current_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_bisect_cfg = os.path.join(path_to_current_directory,
'run-bisect-perf-regression.cfg')
config = _LoadConfigFile(path_to_bisect_cfg)
# Check if the config is empty
config_has_values = [v for v in config.values() if v]
if config and config_has_values:
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
return _RunBisectionScript(config, opts.working_directory,
path_to_current_directory, opts.path_to_goma, opts.extra_src,
opts.dry_run)
else:
perf_cfg_files = ['run-perf-test.cfg', os.path.join('..', 'third_party',
'WebKit', 'Tools', 'run-perf-test.cfg')]
for current_perf_cfg_file in perf_cfg_files:
path_to_perf_cfg = os.path.join(
os.path.abspath(os.path.dirname(sys.argv[0])), current_perf_cfg_file)
config = _LoadConfigFile(path_to_perf_cfg)
config_has_values = [v for v in config.values() if v]
if config and config_has_values:
return _SetupAndRunPerformanceTest(config, path_to_current_directory,
opts.path_to_goma)
print 'Error: Could not load config file. Double check your changes to '\
'run-bisect-perf-regression.cfg/run-perf-test.cfg for syntax errors.'
print
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -7,143,241,393,861,652,000 | 32.349057 | 80 | 0.657638 | false |
anshulkgupta/viznow | Mayank/blpapi_python3.5.5/examples/SimpleAsyncSubscription.py | 1 | 10206 | # SimpleAsyncSubscription.py
import blpapi
import time
import traceback
import thread
import weakref
from optparse import OptionParser
from blpapi import Event as EventType
SESSION_STARTED = blpapi.Name("SessionStarted")
SESSION_STARTUP_FAILURE = blpapi.Name("SessionStartupFailure")
TOKEN_SUCCESS = blpapi.Name("TokenGenerationSuccess")
TOKEN_FAILURE = blpapi.Name("TokenGenerationFailure")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
AUTHORIZATION_FAILURE = blpapi.Name("AuthorizationFailure")
TOKEN = blpapi.Name("token")
AUTH_SERVICE = "//blp/apiauth"
EVENT_TYPE_NAMES = {
EventType.ADMIN: "ADMIN",
EventType.SESSION_STATUS: "SESSION_STATUS",
EventType.SUBSCRIPTION_STATUS: "SUBSCRIPTION_STATUS",
EventType.REQUEST_STATUS: "REQUEST_STATUS",
EventType.RESPONSE: "RESPONSE",
EventType.PARTIAL_RESPONSE: "PARTIAL_RESPONSE",
EventType.SUBSCRIPTION_DATA: "SUBSCRIPTION_DATA",
EventType.SERVICE_STATUS: "SERVICE_STATUS",
EventType.TIMEOUT: "TIMEOUT",
EventType.AUTHORIZATION_STATUS: "AUTHORIZATION_STATUS",
EventType.RESOLUTION_STATUS: "RESOLUTION_STATUS",
EventType.TOPIC_STATUS: "TOPIC_STATUS",
EventType.TOKEN_STATUS: "TOKEN_STATUS",
EventType.REQUEST: "REQUEST"
}
class Error(Exception):
pass
def getAuthentificationOptions(type, name):
if type == "NONE":
return None
elif type == "USER_APP":
return "AuthenticationMode=USER_AND_APPLICATION;"\
"AuthenticationType=OS_LOGON;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + name
elif type == "APPLICATION":
return "AuthenticationMode=APPLICATION_ONLY;"\
"ApplicationAuthenticationType=APPNAME_AND_KEY;"\
"ApplicationName=" + name
elif type == "DIRSVC":
return "AuthenticationType=DIRECTORY_SERVICE;"\
"DirSvcPropertyName=" + name
else:
return "AuthenticationType=OS_LOGON"
def topicName(security):
if security.startswith("//"):
return security
else:
return "//blp/mktdata/" + security
def printMessage(msg, eventType):
print "#{0} msg received: [{1}] => {2}/{3}".format(
thread.get_ident(),
", ".join(map(str, msg.correlationIds())),
EVENT_TYPE_NAMES[eventType],
msg)
def parseCmdLine():
parser = OptionParser()
parser.add_option("-a",
"--host",
dest="host",
help="HOST address to connect to",
metavar="HOST",
default="10.8.8.1")
parser.add_option("-p",
"--port",
dest="port",
type="int",
help="PORT to connect to (%default)",
metavar="PORT",
default=8194)
parser.add_option("-s",
"--security",
dest="securities",
help="security to subscribe to "
"('IBM US Equity' by default)",
metavar="SECURITY",
action="append",
default=[])
parser.add_option("-f",
"--fields",
dest="fields",
help="comma "
"separated list of FIELDS to subscribe to "
"('LAST_PRICE,BID,ASK' by default)",
metavar="FIELDS",
default="LAST_PRICE,BID,ASK")
parser.add_option("",
"--auth-type",
type="choice",
choices=["LOGON", "NONE", "APPLICATION", "DIRSVC",
"USER_APP"],
dest="authType",
help="Authentification type: LOGON (default), NONE, "
"APPLICATION, DIRSVC or USER_APP",
default="LOGON")
parser.add_option("",
"--auth-name",
dest="authName",
help="The name of application or directory service",
default="")
(options, args) = parser.parse_args()
if not options.securities:
options.securities = ["IBM US Equity"]
options.auth = getAuthentificationOptions(options.authType,
options.authName)
return options
# Subscribe 'session' for the securities and fields specified in 'options'
def subscribe(session, options, identity=None):
sl = blpapi.SubscriptionList()
for s in options.securities:
topic = topicName(s)
cid = blpapi.CorrelationId(s)
print "Subscribing {0} => {1}".format(cid, topic)
sl.add(topic, options.fields, correlationId=cid)
session.subscribe(sl, identity)
# Event handler
def processEvent(event, session):
global identity
global options
try:
eventType = event.eventType()
for msg in event:
# Print all aincoming messages including the SubscriptionData
printMessage(msg, eventType)
if eventType == EventType.SESSION_STATUS:
if msg.messageType() == SESSION_STARTED:
# Session.startAsync completed successfully
# Start the authorization if needed
if options.auth:
# Generate token
session.generateToken()
else:
identity = None
# Subscribe for the specified securities/fields
subscribe(session, options)
elif msg.messageType() == SESSION_STARTUP_FAILURE:
# Session.startAsync failed, raise exception to exit
raise Error("Can't start session")
elif eventType == EventType.TOKEN_STATUS:
if msg.messageType() == TOKEN_SUCCESS:
# Token generated successfully
# Continue the authorization
# Get generated token
token = msg.getElementAsString(TOKEN)
# Open auth service (we do it syncroniously, just in case)
if not session.openService(AUTH_SERVICE):
raise Error("Failed to open auth service")
# Obtain opened service
authService = session.getService(AUTH_SERVICE)
# Create and fill the authorization request
authRequest = authService.createAuthorizationRequest()
authRequest.set(TOKEN, token)
# Create Identity
identity = session.createIdentity()
# Send authorization request to "fill" the Identity
session.sendAuthorizationRequest(authRequest, identity)
else:
# Token generation failed, raise exception to exit
raise Error("Failed to generate token")
elif eventType == EventType.RESPONSE \
or eventType == EventType.PARTIAL_RESPONSE:
if msg.messageType() == AUTHORIZATION_SUCCESS:
# Authorization passed, identity "filled" and can be used
# Subscribe for the specified securities/fields with using
# of the identity
subscribe(session, options, identity)
elif msg.messageType() == AUTHORIZATION_FAILURE:
# Authorization failed, raise exception to exit
raise Error("Failed to pass authorization")
except Error as ex:
print "Error in event handler:", ex
# Interrupt a "sleep loop" in main thread
thread.interrupt_main()
def main():
global options
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
if options.auth:
sessionOptions.setAuthenticationOptions(options.auth)
# Create an EventDispatcher with 2 processing threads
dispatcher = blpapi.EventDispatcher(2)
# Create a Session
session = blpapi.Session(sessionOptions, processEvent, dispatcher)
# Start dispatcher to "pump" the received events
dispatcher.start()
# Start session asyncroniously
if not session.startAsync():
raise Exception("Can't initiate session start.")
# Sleep until application will be interrupted by user (Ctrl+C pressed)
# or because of the exception in event handler
try:
# Note that: 'thread.interrupt_main()' could be used to
# correctly stop the application from 'processEvent'
while True:
time.sleep(1)
finally:
session.stop()
dispatcher.stop()
if __name__ == "__main__":
print "SimpleAsyncSubscription"
try:
main()
except KeyboardInterrupt:
print "Ctrl+C pressed. Stopping..."
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| mit | 4,046,798,626,188,617,700 | 36.8 | 78 | 0.594748 | false |
SUSE/azure-sdk-for-python | azure-mgmt-logic/azure/mgmt/logic/operations/workflows_operations.py | 4 | 37104 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class WorkflowsOperations(object):
"""WorkflowsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2016-06-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-06-01"
self.config = config
def list_by_subscription(
self, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflows by subscription.
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowPaged <azure.mgmt.logic.models.WorkflowPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Logic/workflows'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflows by resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkflowPaged <azure.mgmt.logic.models.WorkflowPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if response.status_code == 201:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Updates a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workflow', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def disable(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Disables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/disable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enable(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Enables a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/enable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def generate_upgraded_definition(
self, resource_group_name, workflow_name, target_schema_version=None, custom_headers=None, raw=False, **operation_config):
"""Generates the upgraded definition for a workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param target_schema_version: The target schema version.
:type target_schema_version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.GenerateUpgradedDefinitionParameters(target_schema_version=target_schema_version)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/generateUpgradedDefinition'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'GenerateUpgradedDefinitionParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_swagger(
self, resource_group_name, workflow_name, custom_headers=None, raw=False, **operation_config):
"""Gets an OpenAPI definition for the workflow.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/listSwagger'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_access_key(
self, resource_group_name, workflow_name, key_type=None, custom_headers=None, raw=False, **operation_config):
"""Regenerates the callback URL access key for request triggers.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param key_type: The key type. Possible values include:
'NotSpecified', 'Primary', 'Secondary'
:type key_type: str or :class:`KeyType
<azure.mgmt.logic.models.KeyType>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
key_type1 = models.RegenerateActionParameter(key_type=key_type)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/regenerateAccessKey'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_type1, 'RegenerateActionParameter')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def validate(
self, resource_group_name, location, workflow_name, workflow, custom_headers=None, raw=False, **operation_config):
"""Validates the workflow definition.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param location: The workflow location.
:type location: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param workflow: The workflow definition.
:type workflow: :class:`Workflow <azure.mgmt.logic.models.Workflow>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/locations/{location}/workflows/{workflowName}/validate'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(workflow, 'Workflow')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit | 807,917,002,134,598,900 | 45.671698 | 163 | 0.640497 | false |
darkfeline/animanager | animanager/files/anime.py | 1 | 3144 | # Copyright (C) 2015-2016 Allen Li
#
# This file is part of Animanager.
#
# Animanager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Animanager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Animanager. If not, see <http://www.gnu.org/licenses/>.
"""Anime file tools."""
import json
import os
import re
from collections import defaultdict
from itertools import chain
from typing import Iterable, Set
class AnimeFiles:
r"""Used for matching and grouping files for an anime.
>>> x = AnimeFiles(r'madoka.*?(?P<ep>\d+)')
>>> x.add('madoka - 01.mkv')
>>> x.add('madoka - 01v2.mkv')
>>> x.add('madoka - 02.mkv')
>>> x.add('utawarerumono - 01.mkv')
>>> list(x)
[1, 2]
>>> x.available_string(0)
'1,2'
>>> x[1] == {'madoka - 01.mkv', 'madoka - 01v2.mkv'}
True
"""
EPISODES_TO_SHOW = 6
def __init__(self, regexp: str, filenames: Iterable = ()) -> None:
self.regexp = re.compile(regexp, re.I)
# Maps episode int to list of filename strings.
self.by_episode = defaultdict(set)
self.add_iter(filenames)
def __getitem__(self, episode: int) -> Set[str]:
return self.by_episode[episode]
def __contains__(self, episode: int) -> bool:
return episode in self.by_episode
def __iter__(self):
return iter(sorted(self.by_episode))
def __repr__(self):
return 'AnimeFiles({}, {})'.format(
self.regexp.pattern,
self.filenames,
)
def add(self, filename):
"""Try to add a file."""
basename = os.path.basename(filename)
match = self.regexp.search(basename)
if match:
self.by_episode[int(match.group('ep'))].add(filename)
def add_iter(self, filenames):
"""Try to add files."""
for filename in filenames:
self.add(filename)
def available_string(self, episode):
"""Return a string of available episodes."""
available = [ep for ep in self if ep > episode]
string = ','.join(str(ep) for ep in available[:self.EPISODES_TO_SHOW])
if len(available) > self.EPISODES_TO_SHOW:
string += '...'
return string
@property
def filenames(self):
"""Added filenames."""
return list(chain(*self.by_episode.values()))
def to_json(self):
"""Export AnimeFiles to JSON string."""
return json.dumps({
'regexp': self.regexp.pattern,
'files': self.filenames,
})
@classmethod
def from_json(cls, string):
"""Create AnimeFiles from JSON string."""
obj = json.loads(string)
return cls(obj['regexp'], obj['files'])
| gpl-3.0 | 8,488,993,397,748,272,000 | 28.942857 | 78 | 0.611005 | false |
Robbie1977/TGscripts | CwarpToTemplate.py | 1 | 11633 | import os, shutil, subprocess, datetime, socket, fnmatch, tarfile, time
from mysettings import cmtkdir, fiji, Tfile, ver, Qual, LsmPP
outdir = os.getcwd() + '/'
#fo = open("PLpre.txt",'r')
#filelist = fo.readlines()
#fo.close()
hostn = socket.gethostname()
runid = os.getpid()
procid = '[' + hostn + ';' + str(runid) + ']'
#for fname in filelist:
while not hostn in open('stop.txt', 'rb').read():
for fname in os.listdir('intray'):
if fnmatch.fnmatch(fname, '*.lsm') or fnmatch.fnmatch(fname, '*.raw') or fnmatch.fnmatch(fname, '*.tif'):
fname = fname.replace('\n','').replace('/disk/data/VFB/IMAGE_DATA/Janelia2012/TG/logs/',outdir)
Pcode = ''
try:
if os.path.exists('intray/' + fname):
os.rename('intray/' + fname,fname)
Pcode += 'R'
basename = fname.replace('.lsm','')
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Started JH warp (' + ver + '), ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
FloatFile = fname
GxDF = outdir + basename + '-global.raw'
Goutput = basename + '-PP_C2.nrrd'
Ixform = outdir + basename + '-PP-initial.xform'
Axform = outdir + basename + '-PP-affine.xform'
Foutput = Goutput.replace('-PP.raw', '-PP_C2.nrrd')
SigFile = Goutput.replace('-PP.raw', '-PP_C1.nrrd')
MetFile = Goutput.replace('-PP.raw', '-PP_Meta.log')
W5xform = outdir + basename + '-PP-warp.xform'
W5output = outdir + basename + '-PP-BGwarp.nrrd'
Wsigout = outdir + basename + '-PP-SGwarp.nrrd'
Routput = basename + '-PP-BGwarp.nrrd'
Qualout = basename + '-PP-warp.raw_qual.csv'
Loutput = basename + '-PP-warp-local'
print 'Warping file %s...' % fname
#check for complete skip
if os.path.exists(W5xform):
print 'Warp5 output already exists - skipping.'
Pcode += 'C'
else:
#Generate the Initial preprocessing
Pcode += 'P'
if os.path.exists(Foutput):
print 'Preprocessing already done - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice xvfb-run ' + fiji + ' -macro %s ./%s -batch' % (LsmPP, FloatFile), shell=True) # -F %s , GxDF
print 'Preprocessing returned: %d' % return_code
Pcode += 'pp'
#Convert raw to nrrd
#return_code = subprocess.call('nice ' + fiji + ' -macro %s %s -batch' % (Rawconv, Goutput), shell=True)
#print 'Fiji/ImageJ conversion returned: %d' % return_code
#Pcode += 'F'
#Generate the Initial Transform
if os.path.exists(Ixform):
print 'Initial xform already exists - skipping.'
Pcode += 'S'
FloatFile = Foutput
else:
FloatFile = Foutput
return_code = subprocess.call('nice ' + cmtkdir + 'make_initial_affine --principal-axes %s %s %s' % (Tfile, FloatFile, Ixform + '_part'), shell=True)
os.rename(Ixform + '_part', Ixform)
print 'Initial registration returned: %d' % return_code
Pcode += 'I'
if os.path.exists(Axform):
print 'Affine xform already exists - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'registration --initial %s --dofs 6,9 --auto-multi-levels 4 -o %s %s %s' % (Ixform, Axform + '_part', Tfile, FloatFile), shell=True)
os.rename(Axform + '_part', Axform)
print 'registration returned: %d' % return_code
Pcode += 'A'
#Generate the Warped Transform
if os.path.exists(W5xform):
print 'Warp5 xform already exists - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'warp -o %s --grid-spacing 80 --exploration 30 --coarsest 4 --accuracy 0.2 --refine 4 --energy-weight 1e-1 --initial %s %s %s' % (W5xform + '_part', Axform, Tfile, FloatFile), shell=True) #coarsest adjusted from 8 to 4 as per greg sug.
os.rename(W5xform + '_part', W5xform)
print 'warp (5) returned: %d' % return_code
Pcode += 'W'
#Output a file to show the Warped Transform
if os.path.exists(W5output):
print 'Warp5 output already exists - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'reformatx -o %s --floating %s %s %s' % (W5output, FloatFile, Tfile, W5xform), shell=True)
print 'reformatx returned: %d' % return_code
Pcode += 'R'
print 'Completed background warpimg for %s.' % basename
if os.path.exists(Qualout):
print 'Quality measure already exists - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice python %s %s %s %s &' % (Qual, W5output, Tfile, Qualout), shell=True)
print 'Qual returned: %d' % return_code
Pcode += 'Q'
print 'Completed generating Qual measure for %s.' % basename
if os.path.exists(Wsigout):
print 'Signal warp output already exists - skipping.'
Pcode += 'S'
else:
return_code = subprocess.call('nice ' + cmtkdir + 'reformatx -o %s --floating %s %s %s' % (Wsigout, SigFile, Tfile, W5xform), shell=True)
print 'reformatx returned: %d' % return_code
Pcode += 'D'
print 'Completed signal warpimg for %s.' % basename
# if os.path.exists(Routput):
# print 'RAW warp output already exists - skipping.'
# else:
# return_code = subprocess.call('nice ' + fiji + ' -macro %s %s -batch' % (Nrrdconv, Routput), shell=True)
# print 'Fiji returned: %d' % return_code
# print 'Completed generating RAW warp for %s.' % basename
if os.path.exists(W5output):
Pcode += 'F'
#os.rename(fname,fname.replace('.lsm','-Done.lsm').replace('.raw','-Dome.raw').replace('.tif','-Done.tif'))
#shutil.move(fname.replace('_blue',''),fname.replace('logs/','logs/nrrds/'))
if os.path.exists(Foutput):
os.remove(Foutput)
if os.path.exists(SigFile):
os.remove(SigFile)
if os.path.exists(Ixform):
os.remove(Ixform)
if os.path.exists(Axform):
shutil.rmtree(Axform, ignore_errors=True)
#os.remove(Aoutput)
if os.path.exists(W5xform):
#compress final xform directory and remove original:
tar = tarfile.open(W5xform + '.tar.gz', 'w:gz')
tar.add(W5xform)
tar.close()
shutil.rmtree(W5xform, ignore_errors=True)
if os.path.exists(fname):
os.remove(fname)
Pcode += 'C'
with open("PLdone.txt", "a") as myfile:
myfile.write(Wsigout + '\n')
if os.path.exists(MetFile):
os.rename(MetFile,'outtray/' + MetFile)
if os.path.exists(Wsigout):
os.rename(Wsigout,Wsigout.replace(outdir,outdir + 'outtray/'))
if os.path.exists(W5output):
os.rename(W5output,W5output.replace(outdir,outdir + 'outtray/'))
if os.path.exists(W5xform + '.tar.gz'):
os.rename(W5xform + '.tar.gz',W5xform.replace(outdir,outdir + 'outtray/') + '.tar.gz')
if os.path.exists(Qualout):
os.rename(Qualout,'outtray/' + Qualout)
Pcode += 'M'
#os.remove(W5output) #Needed for checking only
print 'Clean-up for %s done.' % basename
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Finished JH warp (' + Pcode + '), ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
else:
print 'Failed warpimg for %s.' % basename
os.rename(fname,'intray/' + fname)
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Failed JH warp (' + Pcode + '), ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
except OSError as e:
print 'Skiping file'
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write(basename + ', Error during JH warp (' + Pcode + '): ' + e.strerror + ' (' + str(e.filename) +'), ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
try:
os.rename(fname,'intray/' + fname)
except:
print 'Not started!'
print 'All Done...'
time.sleep(88) # sleep for 1.33 mins
print 'Stop requested!'
with open("PLwarp.log", "a") as myfile: # Log entry for process time and error checking
myfile.write('Stoped by request!, ' + procid + ', ' + str(datetime.datetime.now()) + '\n')
| mit | -370,387,519,062,286,900 | 53.660287 | 313 | 0.435227 | false |
PeterSurda/PyBitmessage | dev/bloomfiltertest.py | 3 | 1918 | """
dev/bloomfiltertest.py
======================
"""
import sqlite3
from os import getenv, path
from time import time
from pybloom import BloomFilter as BloomFilter1 # pylint: disable=import-error
from pybloomfilter import BloomFilter as BloomFilter2 # pylint: disable=import-error
# Ubuntu: apt-get install python-pybloomfiltermmap
conn = sqlite3.connect(path.join(getenv("HOME"), '.config/PyBitmessage/messages.dat'))
conn.text_factory = str
cur = conn.cursor()
rawlen = 0
itemcount = 0
cur.execute('''SELECT COUNT(hash) FROM inventory''')
for row in cur.fetchall():
itemcount = row[0]
filtersize = 1000 * (int(itemcount / 1000) + 1)
errorrate = 1.0 / 1000.0
bf1 = BloomFilter1(capacity=filtersize, error_rate=errorrate)
bf2 = BloomFilter2(capacity=filtersize, error_rate=errorrate)
item = '''SELECT hash FROM inventory'''
cur.execute(item, '')
bf1time = 0
bf2time = 0
for row in cur.fetchall():
rawlen += len(row[0])
try:
times = [time()]
bf1.add(row[0])
times.append(time())
bf2.add(row[0])
times.append(time())
bf1time += times[1] - times[0]
bf2time += times[2] - times[1]
except IndexError:
pass
# f = open("/home/shurdeek/tmp/bloom.dat", "wb")
# sb1.tofile(f)
# f.close()
print "Item count: %i" % (itemcount)
print "Raw length: %i" % (rawlen)
print "Bloom filter 1 length: %i, reduction to: %.2f%%" % \
(bf1.bitarray.buffer_info()[1],
100.0 * bf1.bitarray.buffer_info()[1] / rawlen)
print "Bloom filter 1 capacity: %i and error rate: %.3f%%" % (bf1.capacity, 100.0 * bf1.error_rate)
print "Bloom filter 1 took %.2fs" % (bf1time)
print "Bloom filter 2 length: %i, reduction to: %.3f%%" % \
(bf2.num_bits / 8,
100.0 * bf2.num_bits / 8 / rawlen)
print "Bloom filter 2 capacity: %i and error rate: %.3f%%" % (bf2.capacity, 100.0 * bf2.error_rate)
print "Bloom filter 2 took %.2fs" % (bf2time)
| mit | 6,938,646,361,064,516,000 | 28.060606 | 99 | 0.647028 | false |
SahilTikale/haas | tests/deployment/vlan_networks.py | 4 | 10638 | """Test various properties re: vlan tagged networks, on real hardware.
For guidance on running these tests, see the section on deployment
tests in docs/testing.md
"""
import json
from collections import namedtuple
from hil import api, model, deferred, config, errors
from hil.test_common import config_testsuite, fail_on_log_warnings, \
fresh_database, with_request_context, site_layout, NetworkTest, \
network_create_simple, server_init
import pytest
@pytest.fixture
def configure():
"""Configure HIL."""
config_testsuite()
config.load_extensions()
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
fresh_database = pytest.fixture(fresh_database)
server_init = pytest.fixture(server_init)
with_request_context = pytest.yield_fixture(with_request_context)
site_layout = pytest.fixture(site_layout)
pytestmark = pytest.mark.usefixtures('configure',
'server_init',
'fresh_database',
'with_request_context',
'site_layout')
class TestNetworkVlan(NetworkTest):
"""NetworkTest using tagged vlan networks."""
def test_isolated_networks(self):
"""Do a bunch of network operations on the switch, and verify things
along the way.
The above is super vague; unfortunately the setup operations are very
slow, so it makes a huge difference to do everything in one pass. See
the comments in-line to understand exactly what is being tested.
"""
def get_legal_channels(network):
"""Get the legal channels for a network."""
response_body = api.show_network(network)
response_body = json.loads(response_body)
return response_body['channels']
def create_networks():
"""Create networks and connect things to them.
Test various things along the way.
"""
nodes = self.collect_nodes()
# Create four networks
network_create_simple('net-0', 'anvil-nextgen')
network_create_simple('net-1', 'anvil-nextgen')
network_create_simple('net-2', 'anvil-nextgen')
network_create_simple('net-3', 'anvil-nextgen')
ports = self.get_all_ports(nodes)
# get the switch name from any of the nics
switch = nodes[0].nics[0].port.owner
# Assert that n0 and n1 are not on any network
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == \
set()
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set()
# Get the channel ids for the tagged versions of the networks:
net_tag = {}
net_tag[0] = get_legal_channels('net-0')[1]
net_tag[1] = get_legal_channels('net-1')[1]
net_tag[2] = get_legal_channels('net-2')[1]
# Connect node 0 to net-0 (native mode)
api.node_connect_network(nodes[0].label,
nodes[0].nics[0].label,
'net-0')
# before connecting node 1 to net-1 in tagged mode, we must check
# if the switch supports nativeless trunk mode; if not, then we
# add some native network and perform additional checks before
# proceeding.
if 'nativeless-trunk-mode' not in switch.get_capabilities():
# connecting the first network as tagged should raise an error
with pytest.raises(errors.BlockedError):
api.node_connect_network(nodes[1].label,
nodes[1].nics[0].label,
'net-2',
channel=net_tag[2])
api.node_connect_network(nodes[1].label,
nodes[1].nics[0].label,
'net-2')
deferred.apply_networking()
# Connect node 1 to net-1 (tagged mode)
api.node_connect_network(nodes[1].label,
nodes[1].nics[0].label,
'net-1',
channel=net_tag[1])
deferred.apply_networking()
# Assert that n0 and n1 are on isolated networks
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == \
set([nodes[0].nics[0].port])
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set([nodes[1].nics[0].port])
# Add n2 and n3 to the same networks as n0 and n1 respectively, but
# with different channels (native vs. tagged)
if 'nativeless-trunk-mode' not in switch.get_capabilities():
api.node_connect_network(nodes[2].label,
nodes[2].nics[0].label,
'net-3')
deferred.apply_networking()
api.node_connect_network(nodes[2].label,
nodes[2].nics[0].label,
'net-0',
channel=net_tag[0])
api.node_connect_network(nodes[3].label,
nodes[3].nics[0].label,
'net-1')
deferred.apply_networking()
# Assert that n2 and n3 have been added to n0 and n1's networks
# respectively
port_networks = self.get_port_networks(ports)
assert self.get_network(nodes[0].nics[0].port, port_networks) == \
set([nodes[0].nics[0].port, nodes[2].nics[0].port])
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set([nodes[1].nics[0].port, nodes[3].nics[0].port])
# Verify that we can put nodes on more than one network, with
# different channels:
# at this point, node-2 is connected to net-0 (tagged)
# and depending on the switch, to net-3 (native). Let's connect it
# to net-1 (tagged) (which node-1 is connected to)
api.node_connect_network(nodes[2].label,
nodes[2].nics[0].label,
'net-1',
channel=net_tag[1])
deferred.apply_networking()
port_networks = self.get_port_networks(ports)
# assert that node-2 was added to node-1's network correctly.
assert self.get_network(nodes[1].nics[0].port, port_networks) == \
set([nodes[1].nics[0].port,
nodes[2].nics[0].port,
nodes[3].nics[0].port])
def delete_networks():
"""Tear down things set up by create_networks
again, we do various checks along the way.
"""
# Query the DB for nodes on this project
project = api.get_or_404(model.Project, 'anvil-nextgen')
nodes = project.nodes
ports = self.get_all_ports(nodes)
# Remove all nodes from their networks. We do this in two ways, to
# test the different mechanisms.
# For the first two nodes, we first build up a list of
# the arguments to the API calls, which has no direct references to
# database objects, and then make the API calls and invoke
# deferred.apply_networking after. This is important --
# The API calls and apply_networking normally run in their own
# transaction. We get away with not doing this in the tests because
# we serialize everything ourselves, so there's no risk of
# interference. If we were to hang on to references to database
# objects across such calls however, things could get harry.
all_attachments = []
net = namedtuple('net', 'node nic network channel')
for node in nodes[:2]:
attachments = model.NetworkAttachment.query \
.filter_by(nic=node.nics[0]).all()
for attachment in attachments:
all_attachments.append(
net(node=node.label,
nic=node.nics[0].label,
network=attachment.network.label,
channel=attachment.channel))
switch = nodes[0].nics[0].port.owner
# in some switches, the native network can only be disconnected
# after we remove all tagged networks first. The following checks
# for that and rearranges the networks (all_attachments) such that
# tagged networks are removed first.
if 'nativeless-trunk-mode' not in switch.get_capabilities():
# sort by channel; vlan/<integer> comes before vlan/native
# because the ASCII for numbers comes before ASCII for letters.
all_attachments = sorted(all_attachments,
key=lambda net: net.channel)
for attachment in all_attachments:
api.node_detach_network(attachment.node, attachment.nic,
attachment.network)
deferred.apply_networking()
# For the second two nodes, we just call port_revert on the nic's
# port.
for node in nodes[2:]:
port = node.nics[0].port
api.port_revert(port.owner.label, port.label)
deferred.apply_networking()
# Assert that none of the nodes are on any network
port_networks = self.get_port_networks(ports)
for node in nodes:
assert self.get_network(node.nics[0].port, port_networks) == \
set()
# Delete the networks
api.network_delete('net-0')
api.network_delete('net-1')
api.network_delete('net-2')
api.network_delete('net-3')
# Create a project
api.project_create('anvil-nextgen')
create_networks()
delete_networks()
| apache-2.0 | 165,807,373,512,891,000 | 43.325 | 79 | 0.535251 | false |
trevor/calendarserver | calendarserver/provision/root.py | 1 | 16292 | # -*- test-case-name: calendarserver.provision.test.test_root -*-
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
__all__ = [
"RootResource",
]
try:
from calendarserver.platform.darwin.sacl import checkSACL
except ImportError:
# OS X Server SACLs not supported on this system, make SACL check a no-op
checkSACL = lambda *ignored: True
from twext.python.log import Logger
from twisted.cred.error import LoginFailed, UnauthorizedLogin
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.reflect import namedClass
from twisted.web.error import Error as WebError
from twistedcaldav.cache import DisabledCache
from twistedcaldav.cache import MemcacheResponseCache, MemcacheChangeNotifier
from twistedcaldav.cache import _CachedResponseResource
from twistedcaldav.config import config
from twistedcaldav.directory.principal import DirectoryPrincipalResource
from twistedcaldav.extensions import DAVFile, CachingPropertyStore
from twistedcaldav.extensions import DirectoryPrincipalPropertySearchMixIn
from twistedcaldav.extensions import ReadOnlyResourceMixIn
from twistedcaldav.resource import CalDAVComplianceMixIn
from txdav.who.wiki import DirectoryService as WikiDirectoryService
from txdav.who.wiki import uidForAuthToken
from txweb2 import responsecode
from txweb2.auth.wrapper import UnauthorizedResponse
from txweb2.dav.xattrprops import xattrPropertyStore
from txweb2.http import HTTPError, StatusResponse, RedirectResponse
log = Logger()
class RootResource(
ReadOnlyResourceMixIn, DirectoryPrincipalPropertySearchMixIn,
CalDAVComplianceMixIn, DAVFile
):
"""
A special root resource that contains support checking SACLs
as well as adding responseFilters.
"""
useSacls = False
# Mapping of top-level resource paths to SACLs. If a request path
# starts with any of these, then the list of SACLs are checked. If the
# request path does not start with any of these, then no SACLs are checked.
saclMap = {
"addressbooks": ("addressbook",),
"calendars": ("calendar",),
"directory": ("addressbook",),
"principals": ("addressbook", "calendar"),
"webcal": ("calendar",),
}
# If a top-level resource path starts with any of these, an unauthenticated
# request is redirected to the auth url (config.WebCalendarAuthPath)
authServiceMap = {
"webcal": True,
}
def __init__(self, path, *args, **kwargs):
super(RootResource, self).__init__(path, *args, **kwargs)
if config.EnableSACLs:
self.useSacls = True
self.contentFilters = []
if (
config.EnableResponseCache and
config.Memcached.Pools.Default.ClientEnabled
):
self.responseCache = MemcacheResponseCache(self.fp)
# These class attributes need to be setup with our memcache\
# notifier
DirectoryPrincipalResource.cacheNotifierFactory = (
MemcacheChangeNotifier
)
else:
self.responseCache = DisabledCache()
if config.ResponseCompression:
from txweb2.filter import gzip
self.contentFilters.append((gzip.gzipfilter, True))
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
# Get the property store from super
deadProperties = (
namedClass(config.RootResourcePropStoreClass)(self)
)
# Wrap the property store in a memory store
if isinstance(deadProperties, xattrPropertyStore):
deadProperties = CachingPropertyStore(deadProperties)
self._dead_properties = deadProperties
return self._dead_properties
def defaultAccessControlList(self):
return succeed(config.RootResourceACL)
@inlineCallbacks
def checkSACL(self, request):
"""
Check SACLs against the current request
"""
topLevel = request.path.strip("/").split("/")[0]
saclServices = self.saclMap.get(topLevel, None)
if not saclServices:
returnValue(True)
try:
authnUser, authzUser = yield self.authenticate(request)
except Exception:
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
raise HTTPError(response)
# SACLs are enabled in the plist, but there may not actually
# be a SACL group assigned to this service. Let's see if
# unauthenticated users are allowed by calling CheckSACL
# with an empty string.
if authzUser is None:
for saclService in saclServices:
if checkSACL("", saclService):
# No group actually exists for this SACL, so allow
# unauthenticated access
returnValue(True)
# There is a SACL group for at least one of the SACLs, so no
# unauthenticated access
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
log.info("Unauthenticated user denied by SACLs")
raise HTTPError(response)
# Cache the authentication details
request.authnUser = authnUser
request.authzUser = authzUser
# Figure out the "username" from the davxml.Principal object
username = authzUser.record.shortNames[0]
access = False
for saclService in saclServices:
if checkSACL(username, saclService):
# Access is allowed
access = True
break
# Mark SACLs as having been checked so we can avoid doing it
# multiple times
request.checkedSACL = True
if access:
returnValue(True)
log.warn(
"User {user!r} is not enabled with the {sacl!r} SACL(s)",
user=username, sacl=saclServices
)
raise HTTPError(responsecode.FORBIDDEN)
@inlineCallbacks
def locateChild(self, request, segments):
for filter in self.contentFilters:
request.addResponseFilter(filter[0], atEnd=filter[1])
# Examine cookies for wiki auth token; if there, ask the paired wiki
# server for the corresponding record name. If that maps to a
# principal, assign that to authnuser.
# Also, certain non-browser clients send along the wiki auth token
# sometimes, so we now also look for the presence of x-requested-with
# header that the webclient sends. However, in the case of a GET on
# /webcal that header won't be sent so therefore we allow wiki auth
# for any path in the authServiceMap even if that header is missing.
allowWikiAuth = False
topLevel = request.path.strip("/").split("/")[0]
if self.authServiceMap.get(topLevel, False):
allowWikiAuth = True
if not hasattr(request, "checkedWiki"):
# Only do this once per request
request.checkedWiki = True
wikiConfig = config.Authentication.Wiki
cookies = request.headers.getHeader("cookie")
requestedWith = request.headers.hasHeader("x-requested-with")
if (
wikiConfig["Enabled"] and
(requestedWith or allowWikiAuth) and
cookies is not None
):
for cookie in cookies:
if cookie.name == wikiConfig["Cookie"]:
token = cookie.value
break
else:
token = None
if token is not None and token != "unauthenticated":
log.debug(
"Wiki sessionID cookie value: {token}", token=token
)
record = None
try:
uid = yield uidForAuthToken(token)
if uid == "unauthenticated":
uid = None
except WebError as w:
uid = None
# FORBIDDEN status means it's an unknown token
if int(w.status) == responsecode.NOT_FOUND:
log.debug(
"Unknown wiki token: {token}", token=token
)
else:
log.error(
"Failed to look up wiki token {token}: "
"{message}",
token=token, message=w.message
)
except Exception as e:
log.error(
"Failed to look up wiki token: {error}",
error=e
)
uid = None
if uid is not None:
log.debug(
"Wiki lookup returned uid: {uid}", uid=uid
)
principal = yield self.principalForUID(request, uid)
if principal:
log.debug(
"Wiki-authenticated principal {record.uid} "
"being assigned to authnUser and authzUser",
record=record
)
request.authzUser = request.authnUser = principal
if not hasattr(request, "authzUser") and config.WebCalendarAuthPath:
topLevel = request.path.strip("/").split("/")[0]
if self.authServiceMap.get(topLevel, False):
# We've not been authenticated and the auth service is enabled
# for this resource, so redirect.
# Use config.ServerHostName if no x-forwarded-host header,
# otherwise use the final hostname in x-forwarded-host.
host = request.headers.getRawHeaders(
"x-forwarded-host",
[config.ServerHostName]
)[-1].split(",")[-1].strip()
port = 443 if config.EnableSSL else 80
scheme = "https" if config.EnableSSL else "http"
response = RedirectResponse(
request.unparseURL(
host=host,
port=port,
scheme=scheme,
path=config.WebCalendarAuthPath,
querystring="redirect={}://{}{}".format(
scheme,
host,
request.path
)
),
temporary=True
)
raise HTTPError(response)
# We don't want the /inbox resource to pay attention to SACLs because
# we just want it to use the hard-coded ACL for the imip reply user.
# The /timezones resource is used by the wiki web calendar, so open
# up that resource.
if segments[0] in ("inbox", "timezones"):
request.checkedSACL = True
elif (
(
len(segments) > 2 and
segments[0] in ("calendars", "principals") and
(
segments[1] == "wikis" or
(
segments[1] == "__uids__" and
segments[2].startswith(WikiDirectoryService.uidPrefix)
)
)
)
):
# This is a wiki-related calendar resource. SACLs are not checked.
request.checkedSACL = True
# The authzuser value is set to that of the wiki principal if
# not already set.
if not hasattr(request, "authzUser") and segments[2]:
wikiUid = None
if segments[1] == "wikis":
wikiUid = "{}{}".format(WikiDirectoryService.uidPrefix, segments[2])
else:
wikiUid = segments[2]
if wikiUid:
log.debug(
"Wiki principal {name} being assigned to authzUser",
name=wikiUid
)
request.authzUser = yield self.principalForUID(request, wikiUid)
elif (
self.useSacls and
not hasattr(request, "checkedSACL")
):
yield self.checkSACL(request)
if config.RejectClients:
#
# Filter out unsupported clients
#
agent = request.headers.getHeader("user-agent")
if agent is not None:
for reject in config.RejectClients:
if reject.search(agent) is not None:
log.info("Rejecting user-agent: {agent}", agent=agent)
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
"Your client software ({}) is not allowed to "
"access this service."
.format(agent)
))
if not hasattr(request, "authzUser"):
try:
authnUser, authzUser = yield self.authenticate(request)
request.authnUser = authnUser
request.authzUser = authzUser
except (UnauthorizedLogin, LoginFailed):
response = yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
)
raise HTTPError(response)
if (
config.EnableResponseCache and
request.method == "PROPFIND" and
not getattr(request, "notInCache", False) and
len(segments) > 1
):
try:
if not getattr(request, "checkingCache", False):
request.checkingCache = True
response = yield self.responseCache.getResponseForRequest(
request
)
if response is None:
request.notInCache = True
raise KeyError("Not found in cache.")
returnValue((_CachedResponseResource(response), []))
except KeyError:
pass
child = yield super(RootResource, self).locateChild(
request, segments
)
returnValue(child)
@inlineCallbacks
def principalForUID(self, request, uid):
principal = None
directory = request.site.resource.getDirectory()
record = yield directory.recordWithUID(uid)
if record is not None:
username = record.shortNames[0]
log.debug(
"Wiki user record for user {user}: {record}",
user=username, record=record
)
for collection in self.principalCollections():
principal = yield collection.principalForRecord(record)
if principal is not None:
break
returnValue(principal)
def http_COPY(self, request):
return responsecode.FORBIDDEN
def http_MOVE(self, request):
return responsecode.FORBIDDEN
def http_DELETE(self, request):
return responsecode.FORBIDDEN
| apache-2.0 | -7,186,751,252,213,585,000 | 36.281465 | 88 | 0.556776 | false |
infoelliex/odoo-saas-tools | saas_utils/connector.py | 13 | 1300 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
def call(dbname, model, method, *args, **kwargs):
instance = openerp.registry(dbname)
with instance.cursor() as cr:
obj = instance.get(model)
if hasattr(obj, method):
return getattr(obj, method)(cr, SUPERUSER_ID, *args, **kwargs)
| lgpl-3.0 | -472,756,408,337,173,400 | 42.333333 | 78 | 0.613077 | false |
quark-mcu/qm-bootloader | tools/sysupdate/qmfmlib/qfu.py | 2 | 14462 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Quark Microcontroller Firmware Update Module
This module provides classes to create and manipulate firmware images for Quark
Microcontrollers."""
from __future__ import print_function, division, absolute_import
import re
import struct
import hashlib
import hmac
_ENDIAN = "<" # Defines the endian for struct packing. ('<'=little, '>'=big)
# The possible types of extended header.
_QFU_EXT_HDR_NONE = 0
_QFU_EXT_HDR_SHA256 = 1
_QFU_EXT_HDR_HMAC256 = 2
class QFUException(Exception):
"""QFU Exception."""
def __init__(self, message):
super(QFUException, self).__init__(message)
class QFUDefineParser(object):
"""A simple parser for C header files to extract #define of integers
Note:
We only parse simple #define macros like::
#define DFU_ID_VENDOR (0x1200)"""
defines = {}
VENDOR_ID = "QFU_VENDOR_ID"
PRODUCT_ID_DFU = "QFU_DFU_PRODUCT_ID"
PRODUCT_ID_APP = "QFU_APP_PRODUCT_ID"
VERSION = "QFU_VERSION"
BLOCK_SIZE = "QFU_BLOCK_SIZE"
SVN = "QFU_SVN"
# Compiled regular expressions for `#define A int` or `#define A (int)`
_re_int_line = re.compile(
r"^\s*\#define\s+(\S+)\s+\(?(\d+)\)?")
# Compiled regular expressions for `#define A hex` or `#define A (hex)`
_re_hex_line = re.compile(
r"^\s*\#define\s+(\S+)\s+\(?0x([0-9,a-f,A-F]+)\)?")
def _check_line(self, line):
"""Search for valid defines in a line."""
match = self._re_hex_line.match(line)
if match:
grp = match.groups()
self.defines[grp[0]] = int(grp[1], 16)
return
match = self._re_int_line.match(line)
if match:
grp = match.groups()
self.defines[grp[0]] = int(grp[1])
return
def __init__(self, open_file):
"""Opens and parses a C header like file for integer defines."""
for line in open_file.readlines():
self._check_line(line)
class QFUImage(object):
"""Creates a QFU compatible file from a binary file."""
def __init__(self):
self.ext_headers = []
def make(self, header, image_data, key=None, add_sha256=False):
"""Assembles the QFU Header and the binary data.
Args:
header (QFUHeader): Header containing all relevant information to
create the image.
image_data (string): Input file data.
add_sha256 (Bool): Add a sha256 hash to the header.
Returns:
The newly constructed binary data."""
ext_header = QFUExtHeaderNone()
if add_sha256:
ext_header = QFUExtHeaderSHA256(image_data)
elif key:
ext_header = QFUExtHeaderHMAC256(image_data, header, key)
data_blocks = ((len(image_data) - 1) // header.block_size) + 1
header_blocks = ((header.SIZE + ext_header.size() - 1)
// header.block_size) + 1
header.num_blocks = data_blocks + header_blocks
header.add_extended_header(ext_header)
# Set QFU header and DFU suffix.
content = header.packed_qfu_header
content += image_data
return content
class QFUExtHeader(object):
"""Generic Extended header class."""
def __init__(self, ext_hdr_id):
self.content = ""
self.hdr_id = ext_hdr_id
def size(self):
"""Return the size of the extended header, which is a minimum of 4"""
return 4
def compute(self):
pass
class QFUExtHeaderNone(QFUExtHeader):
"""None-Extended Header class. This header contains of empty 32 bits."""
def __init__(self):
self._struct = struct.Struct("%sHH" % _ENDIAN)
super(QFUExtHeaderNone, self).__init__(_QFU_EXT_HDR_NONE)
def compute(self):
"""Compute extended header content."""
self.content = self._struct.pack(self.hdr_id, 0)
def size(self):
"""Return the size of the extended header (4 bytes)"""
return super(QFUExtHeaderNone, self).size()
class QFUExtHeaderSHA256(QFUExtHeader):
"""SHA256 extended header class.
Params:
data (`string`): Content of the binary file."""
def __init__(self, file_content):
self.data = file_content
self._struct = struct.Struct("%sHH32s" % _ENDIAN)
super(QFUExtHeaderSHA256, self).__init__(_QFU_EXT_HDR_SHA256)
def compute(self):
"""Compute extended header content."""
if not self.data:
raise QFUException("No data defined for SHA256 calculation.")
hasher = hashlib.sha256()
hasher.update(self.data)
self.content = self._struct.pack(self.hdr_id, 0, hasher.digest())
def size(self):
"""Return the size of the extended hdr (4bytes + 32bytes = 36bytes)"""
return 32 + super(QFUExtHeaderSHA256, self).size()
class QFUExtHeaderHMAC256(QFUExtHeader):
"""HMAC256 extended header class."""
def __init__(self, data, header, key):
self.data = data
self.key = key
self.svn = header.svn
self.header = header
self.data_blocks = ((len(data) - 1) // header.block_size) + 1
super(QFUExtHeaderHMAC256, self).__init__(_QFU_EXT_HDR_HMAC256)
def compute_blocks(self, block_size, block_cnt):
"""Compute the sha checksum for each block.
Args:
block_size (`int`): Size of each block.
block_cnt (`int`): Number of blocks."""
sha_blocks = ""
block_struct = struct.Struct("%s32s" % _ENDIAN)
# Caculate hash for all blocks
nr_blocks = len(self.data) // block_size
start = 0
end = block_size
for i in range(0, nr_blocks):
hasher = hashlib.sha256()
hash_data = self.data[start:end]
hasher.update(hash_data)
sha_blocks += block_struct.pack(hasher.digest())
start += block_size
end += block_size
# Handle the last block if present.'
if(start < len(self.data)):
hasher = hashlib.sha256()
hash_data = self.data[start:len(self.data)]
hasher.update(hash_data)
sha_blocks += block_struct.pack(hasher.digest())
return sha_blocks
def compute(self):
"""Compute extended header content."""
header_struct = struct.Struct("%sHHI" % _ENDIAN)
if not self.data:
raise QFUException("No data defined for SHA256 calculation.")
if not self.key:
raise QFUException("No key defined for HMAC256 calculation.")
# if not self.svn:
# raise QFUException("No Security version number defined.")
self.content = header_struct.pack(self.hdr_id, 0, self.svn)
self.content += self.compute_blocks(self.header.block_size,
self.header.num_blocks)
# Sign the header
self.content += hmac.new(bytes(self.key),
(bytes(self.header.get_base_header()) +
bytes(self.content)),
digestmod = hashlib.sha256).digest()
def size(self):
"""Return the size of the extended header 4 bytes as usal + 4 bytes SVN
+ sha256 for each block + final hmac256."""
return (4 + (self.data_blocks * 32) + 32 +
super(QFUExtHeaderHMAC256, self).size())
class QFUHeader(object):
"""The class holding QFU Header and DFU Suffix information
Attributes:
id_vendor (int): The DFU/USB vendor id.
id_product (int): The DFU/USB product id.
id_product_dfu (int): The DFU specific product id.
partition_id (int): Target partition number.
version (int): Firmware version of this image.
block_size (int): The DFU block size.
num_blocks (int): The number of blocks in this image.
ext_headers(`list`): List of extended headers.
"""
SIZE = 20
id_vendor = 0
id_product = 0
id_product_dfu = 0
partition_id = 0
version = 0
block_size = None
num_blocks = 0
ext_headers = []
svn = 0
# Different structure formats. _ENDIAN defines little or big-endian.
# H stands for uint16, I for uint32 and c for a single character.
_header_struct = struct.Struct("%sHHHHIHH" % _ENDIAN)
def __init__(self):
self.ext_headers = []
pass
def add_extended_header(self, header):
"""Add an extended header.
Args:
header (`QFUExtHeader`): extended header."""
self.ext_headers.insert(-1, header)
def print_info(self, prefix=""):
"""Prints verbose QFU Header and information."""
inset = " " * len(prefix)
print("%sQFU-Header content:" % prefix)
print("%s Partition: %d" % (inset, self.partition_id))
print("%s VID: 0x%04x" % (inset, self.id_vendor))
print("%s PID: 0x%04x" % (inset, self.id_product))
print("%s DFU PID: 0x%04x" % (inset, self.id_product_dfu))
print("%s Version: %d" % (inset, self.version))
print("%s Block Size: %d" % (inset, self.block_size))
print("%s Blocks: %d" % (inset, self.num_blocks))
def overwrite_config_parameters(self, args):
"""Read arguments from the command line and overwrites the config
parameters
Args:
args: Command-line arguments.
"""
if args.vid is not None:
self.id_vendor = args.vid
if args.app_pid is not None:
self.id_product = args.app_pid
if args.app_version is not None:
self.version = args.app_version
if args.block_size is not None:
self.block_size = args.block_size
if args.svn is not None:
self.svn = args.svn
if args.dfu_pid is not None:
self.id_product_dfu = args.dfu_pid
if self.block_size is None:
if args.soc == "quark_se":
self.block_size = 4096
else:
self.block_size = 2048
def set_from_file(self, open_file):
"""Read configuration file (C-header format) and update header
information.
Args:
open_file (file): An open file with read permission. The file
needs to contain C-header style defines."""
conf = QFUDefineParser(open_file)
# Map configuration to class variables.
if QFUDefineParser.VENDOR_ID in conf.defines:
self.id_vendor = conf.defines[QFUDefineParser.VENDOR_ID]
if QFUDefineParser.PRODUCT_ID_APP in conf.defines:
self.id_product = conf.defines[QFUDefineParser.PRODUCT_ID_APP]
if QFUDefineParser.PRODUCT_ID_DFU in conf.defines:
self.id_product_dfu = conf.defines[QFUDefineParser.PRODUCT_ID_DFU]
if QFUDefineParser.VERSION in conf.defines:
self.version = conf.defines[QFUDefineParser.VERSION]
if QFUDefineParser.BLOCK_SIZE in conf.defines:
self.block_size = conf.defines[QFUDefineParser.BLOCK_SIZE]
if QFUDefineParser.SVN in conf.defines:
self.svn = conf.defines[QFUDefineParser.SVN]
def set_from_data(self, data):
"""Update header information from binary data string.
Args:
data (string): A string containing header packed information."""
if data[:4] != 'QFUH':
raise QFUException("QFUH prefix missing")
data = data[4:]
try:
unpacked_data = self._header_struct.unpack(data)
(
self.id_vendor,
self.id_product,
self.id_product_dfu,
self.partition_id,
self.version,
self.block_size,
self.num_blocks,
) = unpacked_data
except struct.error:
raise QFUException("QFU Header length not valid")
def get_base_header(self):
"""Return the base header."""
return "QFUH" + self._header_struct.pack(*self._pack_header_tuple)
@property
def _pack_header_tuple(self):
"""Tuple containing the header information in a defined order."""
return (
self.id_vendor,
self.id_product,
self.id_product_dfu,
self.partition_id,
self.version,
self.block_size,
self.num_blocks,
)
@property
def packed_qfu_header(self):
"""Binary representation of QFU header."""
ret = self.get_base_header()
# Add extended headers
for header in self.ext_headers:
header.compute()
ret += header.content
# Add padding
ret += b'\x00' * (self.block_size - (len(ret) % self.block_size))
return ret
| bsd-3-clause | 6,907,986,873,129,123,000 | 33.598086 | 79 | 0.599917 | false |
jrwdunham/old-webapp | onlinelinguisticdatabase/controllers/analysis.py | 1 | 52638 | # −*− coding: UTF−8 −*−
# Copyright (C) 2010 Joel Dunham
#
# This file is part of OnlineLinguisticDatabase.
#
# OnlineLinguisticDatabase is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OnlineLinguisticDatabase is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OnlineLinguisticDatabase. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import subprocess
import os
import codecs
import re
import pickle
import urllib
import string
from datetime import datetime
from random import shuffle
try:
import json
except ImportError:
import simplejson as json
from pylons import config, request, response, session, app_globals, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from pylons.decorators.rest import restrict
from form import SearchFormForm
from onlinelinguisticdatabase.lib.base import BaseController, render
from onlinelinguisticdatabase.lib.analysisObjects import Phonology
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.myworker import worker_q
import onlinelinguisticdatabase.model as model
import onlinelinguisticdatabase.model.meta as meta
from sqlalchemy.sql import not_
log = logging.getLogger(__name__)
delim = app_globals.morphDelimiters
phonologyFileName = u'phonology.foma'
phonologyRegexFileName = u'phonology_regex.foma'
phonologyBinaryFileName = u'phonology.foma.bin'
compilePhonologyFileName = u'compilephonology.sh'
saveStackPhonologyFileName = u'savestackphonology.sh'
morphotacticsFileName = u'morphotactics.foma'
morphophonologyFileName = u'morphophonology.foma'
morphophonologyBinaryFileName = u'morphophonology.foma.bin'
compileMorphophonologyFileName = u'compilemorphophonology.sh'
probabilityCalculatorFileName = u'probabilityCalculator.pickle'
lexiconFileName = u'lexicon.txt'
analyzedWordsFileName = u'analyzed_words.txt'
orthographicVariationFileName = u'orthographicvariation.foma'
orthographicVariationBinaryFileName = u'orthographicvariation.foma.bin'
analysisDataDir = config['app_conf']['analysis_data']
def whereis(program):
log.debug('Here is the request environ: %s' % request.environ.get('PATH', ''))
log.debug('Here is the os environ: %s' % os.environ.get('PATH', ''))
for path in request.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
def prod(l):
return reduce(lambda x, y: x * y, l)
def checkRequirements():
try:
parserFiles = os.listdir(analysisDataDir)
except OSError:
parserFiles = []
c.phonologyExists = phonologyFileName in parserFiles
c.morphotacticsExists = morphotacticsFileName in parserFiles
c.morphophonologyExists = morphophonologyFileName in parserFiles
c.morphophonologyBinaryExists = morphophonologyBinaryFileName in parserFiles
c.probabilityCalculatorExists = probabilityCalculatorFileName in parserFiles
c.fomaIsInstalled = whereis('foma')
c.flookupIsInstalled = whereis('flookup')
def generateBinaryFomaFSTFile():
fstSourceFileName = 'phonology.foma'
fstSourceFilePath = os.path.join(analysisDataDir, fstSourceFileName)
fstBinaryFileName = 'phon_bin.foma'
fstBinaryFilePath = os.path.join(analysisDataDir, fstBinaryFileName)
process = subprocess.Popen(
['foma'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
msg = 'source %s\nsave stack %s\n' % (fstSourceFilePath, fstBinaryFilePath)
result = process.communicate(msg)[0]
def getParsesFromFoma(word):
"""Use flookup and the morphophonology FST to get a list of possible parses
for the word.
"""
word = u'#%s#' % word
morphophonologyBinaryFilePath = os.path.join(
analysisDataDir, morphophonologyBinaryFileName)
process = subprocess.Popen(
['flookup', '-x', morphophonologyBinaryFilePath],
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.stdin.write(word.encode('utf-8'))
result = unicode(process.communicate()[0], 'utf-8').split('\n')
return [x[1:-1] for x in result if x]
def getOrthographicVariants(word, limit=20):
"""Use flookup and the orthographicvariation FST to get possible alternate
spellings/transcriptions of the word. Return these ranked by their minimum
edit distance from the word.
"""
print '\n\n\nTRYING TO GET VARIANTS FOR: %s\n\n\n' % word
# Check to see if we have the orthographic variation FST file
if orthographicVariationBinaryFileName not in os.listdir(analysisDataDir):
return []
# Check to see if the nltk module is installed
try:
from nltk.metrics import edit_distance
except ImportError:
return []
# Get variants from flookup
word = u'#%s#' % word
orthographicVariationBinaryFilePath = os.path.join(
analysisDataDir, orthographicVariationBinaryFileName)
process = subprocess.Popen(
['flookup', '-x', '-i', orthographicVariationBinaryFilePath],
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.stdin.write(word.encode('utf-8'))
result = unicode(process.communicate()[0], 'utf-8').split('\n')
#print 'Number of results from flookup: %d' % len(result)
# Remove results that are too long or too short
margin = 2
if len(result) > 1000:
margin = 1
result = [x for x in result
if len(x) < len(word) + 2 and len(x) > len(word) -2]
#print 'Number of results needing edit distancing: %d' % len(result)
# Sort variants by minimum edit distance
result = [(x, edit_distance(word, x)) for x in result]
result.sort(key=lambda x: x[1])
# Take only the top <limit> # of results
result = result[:limit]
# Remove the first result if it has a MED of 0
if result[0][1] == 0:
result = result[1:]
result = [x[0][1:-1] for x in result if x] # Remove hash symbols
return result
def applyFomaPhonology(i, dir):
i = u'#%s#' % i
phonologyBinaryFilePath = os.path.join(
analysisDataDir, phonologyBinaryFileName)
cmdList = ['flookup', '-x', phonologyBinaryFilePath]
if dir == 'inverse':
cmdList = ['flookup', '-x', '-i', phonologyBinaryFilePath]
process = subprocess.Popen(
cmdList,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.stdin.write(i.encode('utf-8'))
result = unicode(process.communicate()[0], 'utf-8').split('\n')
return [x[1:-1] for x in result if x]
def getFormsWithAnalyzedWords():
"""Get all Forms with non-empty mb, mg and scs fields such that all of
these fields, as well as the tr field, have the same number of "words".
"""
forms = meta.Session.query(model.Form).filter(
model.Form.morphemeBreak != u'').filter(
model.Form.morphemeGloss != u'').filter(
model.Form.syntacticCategoryString != u'').all()
# Get all forms whose mb, mg and scs fields have an equal number of
# words
forms = [form for form in forms if
len(form.transcription.split()) ==
len(form.morphemeBreak.split()) ==
len(form.morphemeGloss.split()) ==
len(form.syntacticCategoryString.split())]
return forms
def getAnalyzedWords(forms, lexCats, delim):
"""Return all of the analyzed word tokens present in forms. The criteria
for an analyzed word is that it be analyzed as containing only morphemes
whose categories are from the set of lexical categories given in lexCats.
"""
def isAGoodAnalyzedWord(word):
catList = re.split('|'.join(delim), word[3])
return not set(catList) - set(lexCats)
result = []
for f in forms:
tr = f.transcription.split()
mb = f.morphemeBreak.split()
mg = f.morphemeGloss.split()
sc = f.syntacticCategoryString.split()
words = [x for x in zip(tr, mb, mg, sc) if isAGoodAnalyzedWord(x)]
result += words
return result
def getAllSyncatWordStrings(delim):
"""Returns the set of all syntactic category word strings in the
database. E.g., ['Agr-N', 'Agr-V', ...].
"""
def syncatStringIsGood(syncatString, delim):
# Filters out syntactic category strings containing the category'?'
return '?' not in re.split('|'.join(delim), syncatString)
forms = getFormsWithAnalyzedWords()
syncatStrings = [form.syntacticCategoryString for form in forms]
syncatStringWords = {}
for s in syncatStrings:
for sw in s.split():
if syncatStringIsGood(sw, delim):
syncatStringWords[sw] = 0
return syncatStringWords.keys()
def getLexicalItems(delim):
"""Returns the set of Forms in the database that represent lexical
items. The system assumes lexical items to be those that lack
's', 'S', 'sentence', 'sent' or 'Sentence' as their syntactic
category AND lack spaces in their transcription fields.
"""
def isAValidLexicalItem(f):
nonLexicalCategories = ['s', 'S', 'sentence', 'sent' or 'Sentence']
delimiters = re.compile(u'|'.join(delim))
if f.syntacticCategory and \
(f.syntacticCategory.name not in nonLexicalCategories) and \
not delimiters.search(f.morphemeGloss):
return True
else:
return False
forms = meta.Session.query(model.Form).filter(not_(
model.Form.transcription.like(u'% %'))).all()
forms = [f for f in forms if isAValidLexicalItem(f)]
lexicalItems = {}
for form in forms:
t = form.transcription
key = form.syntacticCategory.name
if len(key) < 2:
key = '%sCategory' % key
if form.morphemeGloss:
g = form.morphemeGloss.replace(u' ', u'.')
else:
g = form.glosses[0].gloss.replace(u' ', u'.')
f = (t, g)
try:
lexicalItems[key].append(f)
except KeyError:
lexicalItems[key] = [f]
return lexicalItems
def getLexicalItemsFromFile(delim):
"""Tries to get the lexical items from the lexicon.txt file. If that fails,
it returns the result of getLexicalItems (which gets the lexical items from
the db.)
"""
lexiconFilePath = os.path.join(analysisDataDir, lexiconFileName)
try:
print 'Getting lexical items from file.'
lexiconFile = codecs.open(lexiconFilePath, 'r', 'utf-8')
lexicalItems = {}
for line in lexiconFile:
if line[0] == u'#':
key = line[1:-1]
lexicalItems[key] = []
elif line != u'\n':
value = tuple(line[:-1].split())
lexicalItems[key].append(value)
return lexicalItems
except IOError:
print 'Getting lexical items from database.'
return getLexicalItems(delim)
# Foma reserved symbols. See
# http://code.google.com/p/foma/wiki/RegularExpressionReference#Reserved_symbols
fomaReserved = [u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026',
u'\u0028', u'\u0029', u'\u002A', u'\u002B', u'\u002C', u'\u002D', u'\u002E',
u'\u002F', u'\u0030', u'\u003A', u'\u003B', u'\u003C', u'\u003E', u'\u003F',
u'\u005B', u'\u005C', u'\u005D', u'\u005E', u'\u005F', u'\u0060', u'\u007B',
u'\u007C', u'\u007D', u'\u007E', u'\u00AC', u'\u00B9', u'\u00D7', u'\u03A3',
u'\u03B5', u'\u207B', u'\u2081', u'\u2082', u'\u2192', u'\u2194', u'\u2200',
u'\u2203', u'\u2205', u'\u2208', u'\u2218', u'\u2225', u'\u2227', u'\u2228',
u'\u2229', u'\u222A', u'\u2264', u'\u2265', u'\u227A', u'\u227B']
def escapeFomaReserved(i):
def escape(ii):
if ii in fomaReserved:
ii = u'%' + ii
return ii
return ''.join([escape(x) for x in i])
def getFomaMorphotacticsFile(syncatWordStrings, lexicalItems, delim):
"""Returns a string consisting of a series of foma statements that
define a morphotactic FST.
Because of some quirks inherent to the foma script parser, long
define statements need to be broken up into shorter ones and then
concatenated via disjunction. Hence the ugly code below.
"""
patt = re.compile('(%s)' % '|'.join(delim))
tmp = [patt.sub(' "\\1" ', s) for s in syncatWordStrings]
fomaMorphotacticsDefinition = {}
c = 1
for i in range(len(tmp)):
word = tmp[i]
if (i % 100 == 99) or (i == len(tmp) - 1):
tmp2 += ' | \n(%s);' % word
fomaMorphotacticsDefinition[varName] = tmp2
elif i % 100 == 0:
varName = u'morphotactics%d' % c
tmp2 = '(%s)' % word
c += 1
else:
tmp2 += ' | \n(%s)' % word
if len(fomaMorphotacticsDefinition) > 1:
subs = []
subNames = fomaMorphotacticsDefinition.keys()
for k in fomaMorphotacticsDefinition:
definition = 'define %s %s' % (k,
fomaMorphotacticsDefinition[k])
subs.append(definition)
fomaMorphotacticsDefinition = u'\n\n'.join(subs) + \
u'\n\ndefine morphotactics "#" (%s) "#";' % ' | \n'.join(subNames)
else:
fomaMorphotacticsDefinition = u'define morphotactics "#" (%s) "#"' % (
fomaMorphotacticsDefinition.values()[0])
def getLexicalItemDefinition(label, forms):
def regexify(i):
"""Returns the string formatted for a foma regular expression. That
is, characters separated by a space and reserved characters escaped.
"""
#return ' '.join(['"%s"' % z for z in i])
return escapeFomaReserved(' '.join(list(i)))
if len(forms) < 100:
return u'define %s [%s];\n\n' % (label,
'|\n '.join(
[regexify(x[0]) + '"|%s":0' % x[1] for x in forms]))
else:
lexicalItemDefinition = {}
c = 1
tmp = u''
for i in range(len(forms)):
form = forms[i]
if (i % 100 == 99) or (i == len(forms) - 1):
tmp += '|\n %s"|%s":0];' % (regexify(form[0]), form[1])
lexicalItemDefinition[varName] = tmp
elif i % 100 == 0:
varName = u'%s%d' % (label, c)
tmp = '[%s"|%s":0' % (regexify(form[0]), form[1])
c += 1
else:
tmp += '|\n %s"|%s":0' % (regexify(form[0]), form[1])
subs = []
subNames = lexicalItemDefinition.keys()
for k in lexicalItemDefinition:
definition = 'define %s %s' % (k,
lexicalItemDefinition[k])
subs.append(definition)
lexicalItemDefinition = u'\n\n'.join(subs) + \
u'\n\ndefine %s %s;\n\n' % (label, ' | '.join(subNames))
return lexicalItemDefinition
fomaLexicalItemsDefinitions = u''
for li in lexicalItems:
label = li
forms = lexicalItems[li]
fomaLexicalItemsDefinitions += getLexicalItemDefinition(label,
forms)
return u'%s\n\n%s' % (fomaLexicalItemsDefinitions,
fomaMorphotacticsDefinition)
def getNGramCounts(analyzedWords, lexItms):
"""Returns a tuple (unigrams, bigrams) where each is a dict from a unigram/
bigram to a count of its occurrences in the database.
"""
unigrams = {}
bigrams = {}
# Increment the value of a key in a dict; if key doesn't exist, assign value 1
def incrementDict(_dict, key):
try:
_dict[key] += 1
except KeyError:
_dict[key] = 1
# Count the unigrams & bigrams from the analyzed words
for w in analyzedWords:
mb = re.split(u'|'.join(delim), w[1])
mg = re.split(u'|'.join(delim), w[2])
bgMorphemes = zip(mb, mg)
incrementDict(unigrams, u'<l>')
incrementDict(unigrams, u'<r>')
for i in range(len(bgMorphemes)):
m = bgMorphemes[i]
m = u'%s|%s' % (m[0], m[1])
incrementDict(unigrams, m)
if i == 0:
incrementDict(bigrams, (u'<l>', m))
else:
mPrev = bgMorphemes[i - 1]
mPrev = u'%s|%s' % (mPrev[0], mPrev[1])
incrementDict(bigrams, (mPrev, m))
if i == (len(bgMorphemes) - 1):
incrementDict(bigrams, (m, u'<r>'))
# Upate the unigram counts with the lexical items (count = 0)
for syncat in lexItms:
liList = lexItms[syncat]
for li in liList:
li = '|'.join(li)
if li not in unigrams:
unigrams[li] = 0
return (unigrams, bigrams)
class ProbabilityCalculator(object):
def __init__(self, unigrams, bigrams, delim):
self.unigrams = unigrams
self.bigrams = bigrams
self.delim = delim
self.N = len(unigrams)
self.bigram2probability = {}
def getBigramsFromAnalysis(self, analysis):
"""Analysis is a string like u'chien|dog-s|PL'. On this string, this
function would return
[(u'<l>', u'chien|dog'), (u'chien|dog', u's|PL'), (u's|PL', u'<r>')]
"""
tmp = [u'<l>'] + re.split('|'.join(self.delim), analysis) + [u'<r>']
result = []
for i in range(len(tmp) - 1):
result.append((tmp[i], tmp[i + 1]))
return result
def getProbOfBigram(self, bigram, analysis=None):
try:
result = self.bigram2probability[bigram]
except KeyError:
numerator = self.bigrams.get(bigram)
if numerator:
numerator += 1
else:
numerator = 1
try:
denominator = self.unigrams[bigram[0]] + self.N
probability = numerator / float(denominator)
except KeyError:
print 'ERROR: could not find count for %s' % bigram[0]
if analysis:
print 'the analysis variable is not None'
probability = 0.00000000001
self.bigram2probability[bigram] = probability
result = probability
return result
def getProbability(self, analysis):
anaBigrams = self.getBigramsFromAnalysis(analysis)
probs = [self.getProbOfBigram(b, analysis) for b in anaBigrams]
result = prod(probs)
return result
def getProbCalc():
"""Try to get a probability calculator object.
"""
try:
probCalc = app_globals.wordProbabilityCalculator
except AttributeError:
try:
probabilityCalculatorPath = os.path.join(analysisDataDir,
probabilityCalculatorFileName)
probCalcPickleFile = open(probabilityCalculatorPath, 'rb')
probCalc = pickle.load(probCalcPickleFile)
app_globals.wordProbabilityCalculator = probCalc
except IOError:
probCalc = None
return probCalc
def splitBreakFromGloss(analysis):
"""Take something like 'abc|123-def|456=ghi|789' and return
('abc-def=ghi', '123-456=789').
"""
splitter = re.compile('(%s)' % '|'.join(delim))
analysis = splitter.split(analysis)
mb = u''
mg = u''
for i in range(len(analysis)):
try:
if i % 2 == 0:
mb += analysis[i].split('|')[0]
mg += analysis[i].split('|')[1]
else:
mb += analysis[i]
mg += analysis[i]
except IndexError:
print 'Warning: unable to split %s, the %d element of %s' % (
analysis[i], i, str(analysis))
return (mb, mg)
def getTrainingAndTestSets(analyzedWords):
mark = int(len(analyzedWords) * 0.9)
shuffle(analyzedWords)
return (analyzedWords[:mark], analyzedWords[mark:])
def analyzeAccentation():
"""
- gets each analyzed word form analyzed_words.txt
- phonologizes the morpheme break line using phonology.foma (apply down)
- checks whether the accentation of the phonologized mb matches that of
the transcription
- prints a report ('prominence_output.txt') with counts, percentages and
lists the non-matching cases sorted by syllable count and,
secondarily, prominence location
"""
def getProminentSyllables(word):
"""Return a tuple of the form (x, y) where y is the number of syllables
in word and x is a tuple representing the location of the prominent
syllables in word. E.g., x = () means 'no prominences', x = (1,) means
'prominence on syllable 1 only' and x = (2, 4) means 'prominence on
syllables 2 and 4.
"""
word = u'#%s#' % word
unaccentedVowels = [u'a', u'i', u'o', u'u', u'e', u'A', u'I', u'O', u'U']
accentedVowels = [u'a\u0301', u'i\u0301', u'o\u0301',
u'\u00c1', u'\u00ED', u'\u00F2', u'\u00CD', u'\u00D3', u'\u00E1',
u'\u00F3']
vowels = unaccentedVowels + accentedVowels
patt = u'([%s]+)' % u'|'.join(vowels)
patt = re.compile(patt)
wordList = patt.split(word)
prominentIndices = []
for i in range(len(wordList)):
syllPart = wordList[i]
if set(list(syllPart)) & set(accentedVowels):
prominentIndices.append(i)
prominentSyllables = tuple([(x / 2) + 1 for x in prominentIndices])
numSyllables = len(wordList) / 2
return (prominentSyllables, numSyllables)
# Get the (unique) analyzed words
analyzedWordsPath = os.path.join(analysisDataDir, analyzedWordsFileName)
f = codecs.open(analyzedWordsPath, 'r', 'utf-8')
lines = f.readlines()
lines = list(set(lines))
syllCount2Form = {}
output = u''
prominenceIsGood = 0
prominenceIsBad = 0
spacer = '#' * 80
spacer2 = '_' * 80
spacer3 = '+' * 80
outputFile = codecs.open(
os.path.join(analysisDataDir, u'prominence_output.txt'),
'w', 'utf-8')
# Get those analyzed words whose prominence pattern is not as expected and
# sort them by number of syllables and location of prominence(s).
i = 1
for line in lines:
print '%d/%d' % (i, len(lines))
i += 1
line = line.split()
tr = removeWordFinalPunctuation(line[0])
prominentSyllables, numSyllables = getProminentSyllables(tr)
mbPhonologized = applyFomaPhonology(line[1], 'inverse')
mbPromSyllsNumSylls = [getProminentSyllables(x) for x in mbPhonologized]
record = (tr, mbPhonologized, line[1])
if (prominentSyllables, numSyllables) not in mbPromSyllsNumSylls:
prominenceIsBad += 1
try:
syllCount2Form[numSyllables][prominentSyllables].append(record)
except KeyError:
if numSyllables not in syllCount2Form:
syllCount2Form[numSyllables] = {prominentSyllables: [record]}
else:
syllCount2Form[numSyllables][prominentSyllables] = [record]
else:
prominenceIsGood += 1
# Write the report of the analysis
output += u'%s\nProminence Analysis of Blackfoot -- Report\n%s\n\n' % (
spacer, spacer)
output += '-%d unique forms analyzed\n' % len(lines)
output += '-%d (%f) had prominence in the expected location\n' % (
prominenceIsGood, prominenceIsGood / float(len(lines)))
output += '-%d (%f) did not have prominence in the expected location\n' % (
prominenceIsBad, prominenceIsBad / float(len(lines)))
output += '\nDetails of forms with unexpected prominence location\n%s\n\n' \
% spacer
for key in sorted(syllCount2Form.keys()):
prom2TokDict = syllCount2Form[key]
output += '\n\n%d-syllable forms (count: %d)\n%s\n' % (
key, sum([len(x) for x in prom2TokDict.values()]), spacer2)
for promLoc in sorted(prom2TokDict.keys()):
recordList = sorted(prom2TokDict[promLoc])
output += '\n\nForms with prominence at syllable(s) %s\n%s\n' % (
', '.join([str(x) for x in promLoc]),
spacer3)
for record in recordList:
output += u'%s\t%s\n' % (record[0], u', '.join(record[1]))
outputFile.write(output)
outputFile.close()
def removeWordFinalPunctuation(word):
punct = ['"', '.', '?', '!', u'\u2019', u'\u201D', u'\u201C',
u'\u2026', u')', u'(', u'*', u'-', u';', u':', u',']
if word[-1] in punct:
word = word[:-1]
return word
def removeExtraneousPunctuation(word):
punctuation = string.punctuation.replace(
"'", "") + u'\u2019\u201D\u201C\u2026'
patt = re.compile('[%s]' % re.escape(punctuation))
return patt.sub('', word)
def saveAnalyzedWordsFile(analyzedWords, fname=None):
if not fname: fname = analyzedWordsFileName
analyzedWordsFilePath = os.path.join(analysisDataDir, fname)
analyzedWordsFile = codecs.open(analyzedWordsFilePath, 'w', 'utf-8')
for w in analyzedWords:
line = u'%s\n' % u' '.join(w)
analyzedWordsFile.write(line)
analyzedWordsFile.close()
class AnalysisController(BaseController):
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def index(self):
checkRequirements()
try:
phonologyFilePath = os.path.join(analysisDataDir, phonologyFileName)
phonologyFile = codecs.open(phonologyFilePath, 'r', 'utf-8')
c.phonology = phonologyFile.read()
phonologyFile.close()
except IOError:
c.phonology = u''
return render('/derived/analysis/index.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def phonology(self):
"""The phonology method simply serves the phonology.html page which
links to the Javascript public/javascript/phonology.js.
All "phonology_"-prefixed methods are Ajax responders.
"""
return render('/derived/analysis/phonology.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def phonology_dependencies_installed(self):
fomaPath = whereis('foma')
flookupPath = whereis('flookup')
log.debug(flookupPath)
if fomaPath and flookupPath:
return json.dumps(True)
return json.dumps(False)
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_add(self):
"""Performs the following actions:
- saves the phonology metadata to the database (name, description, etc.)
- creates the directory for this phonology:
analysis/phonology/phonology_%d
- writes the foma script file (phonology.foma)
- writes the shell script to compile the foma script
(phonologycompile.sh)
- compiles the phonology script (creates phonology.foma.bin)
"""
# 1. Get values entered by user (validation done in Javascript)
log.debug('Getting user inputs.')
values = urllib.unquote_plus(unicode(request.body, 'utf-8'))
values = json.loads(values)
# 2. Create phonology object and write it to database
log.debug('Writing phonology metadata to database.')
phonology = model.Phonology()
now = datetime.utcnow()
phonology.name = values['name']
phonology.description = values['description']
phonology.script = values['script']
phonology.enterer = meta.Session.query(model.User).get(
int(session['user_id']))
phonology.datetimeEntered = now
phonology.datetimeModified = now
meta.Session.add(phonology)
meta.Session.commit()
# 3. Create directory structure, write foma and foma compile scripts
phonology.setup()
# 4. Compile
phonology.compile()
# 5. Return JSON representation
phonologyInJSON = phonology.getAttributesInJSON()
return phonologyInJSON
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_edit(self):
"""Performs the following actions:
- updates the phonology metadata in the database
- if script is different:
- writes the new foma script file
- compiles the phonology script
"""
# 1. Get values entered by user (validation done in Javascript)
log.debug('Getting user inputs.')
values = urllib.unquote_plus(unicode(request.body, 'utf-8'))
values = json.loads(values)
phonology = meta.Session.query(model.Phonology).get(int(values['id']))
if phonology.script != values['script']:
log.debug('Script needs re-writing and -compiling.')
# 3. Create directory structure, write foma and foma compile scripts
phonology.script = values['script']
phonology.setup()
# 4. Compile
phonology.compile()
# 2. Create phonology object and write it to database
log.debug('Updating phonology metadata in database.')
now = datetime.utcnow()
phonology.name = values['name']
phonology.description = values['description']
phonology.script = values['script']
phonology.modifier = meta.Session.query(model.User).get(
int(session['user_id']))
phonology.datetimeModified = now
meta.Session.add(phonology)
meta.Session.commit()
# 5. Return JSON representation
phonologyInJSON = phonology.getAttributesInJSON()
return phonologyInJSON
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_delete(self):
"""Deletes the phonology with the ID provided in the POST stream.
"""
log.debug('Getting phonology to delete.')
id = urllib.unquote_plus(unicode(request.body, 'utf-8'))
id = json.loads(id)
phonology = meta.Session.query(model.Phonology).get(id)
log.debug('Deleting phonology files.')
filesDeleted = phonology.delete()
if filesDeleted:
log.debug('Deleting database entry for phonology.')
meta.Session.delete(phonology)
meta.Session.commit()
return json.dumps(True)
else:
log.debug('Unable to delete files; therefore db entry intact.')
return json.dumps(False)
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_get(self):
"""Return all of the phonologies.
"""
phonologies = meta.Session.query(model.Phonology).order_by(
model.Phonology.name)
return json.dumps([p.getAttributesAsDict() for p in phonologies])
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_apply_to_token(self):
log.debug('Phonologizing token provided by user.')
inputs = urllib.unquote_plus(unicode(request.body, 'utf-8'))
inputs = json.loads(inputs)
token = inputs['token'];
ID = inputs['id']
phonology = Phonology()
phonology.setID(ID)
phonology.getFilePaths()
surfaceForms = phonology.phonologize(token)
return json.dumps(surfaceForms)
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def phonology_apply_to_self(self):
log.debug('Phonologizing #test entries in the script.')
inputs = urllib.unquote_plus(unicode(request.body, 'utf-8'))
inputs = json.loads(inputs)
ID = inputs['id']
phonology = Phonology()
phonology.setID(ID)
phonology.getFilePaths()
result = phonology.phonologizeInternalTests()
log.debug('\n'.join([str(r) for r in result]))
return json.dumps(result)
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def analyzedwordcorpus(self):
"""The analyzedwordcorpus method simply serves the
analyzedwordcorpus.html page which links to the Javascript
public/javascript/analyzedwordcorpus.js. All
"analyzedwordcorpus_"-prefixed methods are Ajax responders.
"""
return render('/derived/analysis/analyzedwordcorpus.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def morphotactics(self):
return render('/derived/analysis/morphotactics.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def morphophonology(self):
return render('/derived/analysis/morphophonology.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def probabilitycalculator(self):
return render('/derived/analysis/probabilitycalculator.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def morphologicalparser(self):
return render('/derived/analysis/morphologicalparser.html')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def getparse(self):
"""Returns a list of tuples representing the possible parses of the
input word.
"""
word = unicode(request.body, 'utf-8')
parses = getParsesFromFoma(word)
if parses == [u'']:
return json.dumps([('NO PARSE', 'NO PARSE')])
probCalc = getProbCalc()
if probCalc:
probs = [probCalc.getProbability(p) for p in parses]
result = zip(parses, probs)
result.sort(key=lambda x: x[1])
result.reverse()
result = [x[0] for x in result]
else:
result = parses
result = json.dumps([splitBreakFromGloss(a) for a in result])
return result
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def savephonology(self):
"""Creates the phonology.foma file as well as phonology_regex.foma,
compilephonology.sh and phonology.foma.bin.
"""
phonology = unicode(request.body, 'utf-8')
# 1. Save phonology.foma file
log.debug('Writing phonology.foma.')
phonologyFilePath = os.path.join(analysisDataDir, phonologyFileName)
phonologyFile = codecs.open(phonologyFilePath, 'w', 'utf-8')
phonologyFile.write(phonology)
phonologyFile.close()
# 2. Save phonology_regex.foma file
log.debug('Writing phonology_regex.foma.')
phonologyRegexFilePath = os.path.join(analysisDataDir,
phonologyRegexFileName)
phonologyRegexFile = codecs.open(phonologyRegexFilePath, 'w', 'utf-8')
phonologyRegexFile.write('%s\n\nregex phonology;' % phonology)
phonologyRegexFile.close()
# 3. Write compilephonology.sh
log.debug('Writing compilephonology.sh.')
phonologyBinaryFilePath = os.path.join(analysisDataDir,
phonologyBinaryFileName)
compilePhonologyPath = os.path.join(
analysisDataDir, compilePhonologyFileName)
compilePhonology = open(compilePhonologyPath, 'w')
cmd = 'foma -e "source %s" -e "save stack %s" -e "quit"' % (
phonologyRegexFilePath, phonologyBinaryFilePath)
compilePhonology.write(cmd)
compilePhonology.close()
os.chmod(compilePhonologyPath, 0755)
# 4. Execute compilephonology.sh
log.debug('Generating phonology.foma.bin.')
process = subprocess.Popen([compilePhonologyPath], shell=True,
stdout=subprocess.PIPE)
now = datetime.utcnow().strftime('at %H:%M on %b %d, %Y')
output = unicode(process.communicate()[0], 'utf-8')
success = 'Writing to file %s' % phonologyBinaryFilePath
if success in output:
msg = "phonology script saved and compiled (%s)." % now
return "<span style='color:green;font-weight:bold;'>%s</span>" % msg
else:
msg = "phonology script saved but unable to compile."
return "<span class='warning-message'>%s</span>" % msg
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def generatemorphotactics(self):
"""Writes the morphotactics.foma file representing the morphotactics
of the language.
"""
print 'Getting syncatWordStrings.'
syncatWordStrings = getAllSyncatWordStrings(delim)
print 'Getting lexicalItems.'
lexicalItems = getLexicalItems(delim)
print 'Writing lexicon.txt file.'
lexiconFilePath = os.path.join(analysisDataDir, lexiconFileName)
lexiconFile = codecs.open(lexiconFilePath, 'w', 'utf-8')
lexiconString = u''
for li in sorted(lexicalItems.keys()):
lexiconString += u'#%s' % li
for x in sorted(lexicalItems[li]):
lexiconString += u'\n%s %s' % (x[0], x[1])
lexiconString += u'\n\n'
lexiconFile.write(lexiconString)
lexiconFile.close()
print 'Getting fomaMorphotacticsFile.'
fomaFile = getFomaMorphotacticsFile(
syncatWordStrings, lexicalItems, delim)
morphotacticsFilePath = os.path.join(analysisDataDir,
morphotacticsFileName)
morphotacticsFile = codecs.open(morphotacticsFilePath, 'w', 'utf-8')
morphotacticsFile.write(fomaFile)
morphotacticsFile.close()
print 'Done.'
now = datetime.utcnow().strftime('at %H:%M on %b %d, %Y')
return "morphotactics file saved (%s)." % now
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def generatemorphophonology(self):
"""This method writes the foma binary file morphophonology.foma.bin
representing the morphophonology FST. In order to do so, it also writes
two other files: morphophonology.foma and compilemorphophonology.sh.
The Python subprocess module is then used to run the shell script
compilemorphophonology.sh which runs a series of foma commands that
result in the creation of the morphohonology.foma.bin file.
"""
morphophonologyBinaryFilePath = os.path.join(
analysisDataDir, morphophonologyBinaryFileName)
# 1. Write morphophonology.foma
print 'Writing morphophonology.foma.'
phonologyFilePath = os.path.join(analysisDataDir, phonologyFileName)
phonologyFile = codecs.open(phonologyFilePath, 'r', 'utf-8')
phonology = phonologyFile.read()
morphotacticsFilePath = os.path.join(analysisDataDir,
morphotacticsFileName)
morphotacticsFile = codecs.open(morphotacticsFilePath, 'r', 'utf-8')
morphotactics = morphotacticsFile.read()
morphophonologyFilePath = os.path.join(analysisDataDir,
morphophonologyFileName)
morphophonologyFile = codecs.open(morphophonologyFilePath, 'w', 'utf-8')
morphophonology = u'%s\n\n\n%s\n\n\n%s\n\n\n%s' % (
morphotactics,
phonology,
'define morphophonology morphotactics .o. phonology;',
'regex morphophonology;'
)
morphophonologyFile.write(morphophonology)
# 2. Write compilemorphophonology.sh
print 'Writing compilemorphophonology.sh.'
compilePath = os.path.join(analysisDataDir,
compileMorphophonologyFileName)
compileFile = open(compilePath, 'w')
cmd = 'foma -e "source %s" -e "save stack %s" -e "quit"' % (
morphophonologyFilePath, morphophonologyBinaryFilePath)
compileFile.write(cmd)
compileFile.close()
os.chmod(compilePath, 0755)
# 3. Execute compilemorphophonology.sh
print 'Generating morphophonology.foma.bin.'
process = subprocess.Popen([compilePath], shell=True,
stdout=subprocess.PIPE)
now = datetime.utcnow().strftime('at %H:%M on %b %d, %Y')
output = unicode(process.communicate()[0], 'utf-8')
print 'Done.'
success = 'Writing to file %s' % morphophonologyBinaryFilePath
if success in output:
msg = "morphophonology binary file generated (%s)." % now
return "<span style='color:green;font-weight:bold;'>%s</span>" % msg
else:
msg = "unable to generate morphophonology binary file."
return "<span class='warning-message'>%s</span>" % msg
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def generateprobabilitycalculator(self):
print 'Getting lexical items.'
lexItms = getLexicalItemsFromFile(delim)
lexCats = lexItms.keys()
print 'Getting Forms with analyzed words.'
forms = getFormsWithAnalyzedWords()
print 'Getting analyzed word tokens.'
analyzedWords = getAnalyzedWords(forms, lexCats, delim)
print 'There are %d analyzed words in the database.' % len(
analyzedWords)
print 'Writing analyzed_words.txt file.'
saveAnalyzedWordsFile(analyzedWords)
print 'Getting ngrams.'
unigrams, bigrams = getNGramCounts(analyzedWords, lexItms)
print 'Getting probabilityCalculator.'
probabilityCalculator = ProbabilityCalculator(unigrams, bigrams, delim)
print 'Updating application globals with probabilityCalculator.'
app_globals.wordProbabilityCalculator = probabilityCalculator
print 'Pickling probabilityCalculator.'
probabilityCalculatorPicklePath = os.path.join(
analysisDataDir, probabilityCalculatorFileName)
probabilityCalculatorPickle = open(
probabilityCalculatorPicklePath, 'wb')
pickle.dump(probabilityCalculator, probabilityCalculatorPickle)
probabilityCalculatorPickle.close()
print 'Done.'
now = datetime.utcnow().strftime('at %H:%M on %b %d, %Y')
msg = 'generated probability calculator (%s)' % now
return "<span style='color:green;font-weight:bold;'>%s</span>" % msg
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def evaluateparser(self):
#analyzeAccentation()
formsFilter = None
formsFilter = 'frantz'
print 'Getting lexical items from file.'
lexItms = getLexicalItemsFromFile(delim)
lexCats = lexItms.keys()
if formsFilter:
print 'Getting Forms with analyzed words.'
forms = getFormsWithAnalyzedWords()
if formsFilter == 'frantz':
badCats = ['vcpx', 'nan', 'nin', 'nar', 'nir', 'vai', 'vii',
'vta', 'vti', 'vrt', 'adt', 'dem', 'prev', 'med',
'fin', 'oth', 'und', 'pro', 'asp', 'ten', 'mod',
'agra', 'agrb', 'thm', 'whq', 'num', 'drt', 'dim',
'o', 'stp', 'PN', 'INT']
forms = [f for f in forms if f.source and
f.source.authorLastName == u'Frantz'
and (not f.syntacticCategory or
f.syntacticCategory.name not in badCats)]
print 'We are analyzing data from %d forms' % len(forms)
print 'Getting analyzed word tokens.'
analyzedWords = getAnalyzedWords(forms, lexCats, delim)
print 'There are %d analyzed words in the database.' % len(
analyzedWords)
print 'Writing analyzed_words_frantz.txt file.'
saveAnalyzedWordsFile(analyzedWords, 'analyzed_words_frantz.txt')
else:
print 'Getting analyzed word tokens from file.'
analyzedWordsFilePath = os.path.join(analysisDataDir,
analyzedWordsFileName)
analyzedWordsFile = codecs.open(analyzedWordsFilePath, 'r', 'utf-8')
analyzedWords = [tuple(x[:-1].split()) for x in analyzedWordsFile]
analyzedWordsFile.close()
print 'Getting training and test sets.'
trainingSet, testSet = getTrainingAndTestSets(analyzedWords)
print 'Getting ngrams from training set.'
unigrams, bigrams = getNGramCounts(trainingSet, lexItms)
print 'Getting probabilityCalculator based on training set.'
probCalc = ProbabilityCalculator(unigrams, bigrams, delim)
def getFScore(results):
try:
numCorrectParses = len([x for x in results if x[1] == x[2]])
numGuesses = len([x for x in results if x[2]])
numActualParses = len(results)
P = numCorrectParses / float(numGuesses)
R = numCorrectParses / float(numActualParses)
F = (2 * P * R) / (P + R)
except ZeroDivisionError:
F = 0.0
return F
def printResult(result, F, i):
rightAnswerInParses = result[1] in result[3]
correct = result[2] == result[1]
print '%d. %s %s' % (i, result[0],
{True: 'Correct', False: 'Incorrect'}[correct])
try:
print '\tbest guess: %s' % ' '.join(result[2])
except TypeError:
print '\tbest guess: NO GUESS'
print '\tright answer: %s' % ' '.join(result[1])
print '\tright answer in parses: %s' % rightAnswerInParses
print '\tF-score: %f' % F
results = []
resultsConformingToPhonology = []
lookup = {}
i = 1
for word in testSet:
tr = word[0]
tr = removeWordFinalPunctuation(tr) # Remove word-final punctuation
mb = word[1]
mg = word[2]
variants = []
rightAnswer = (mb, mg)
conformsToPhonology = False
phonologizeds = applyFomaPhonology(mb, 'inverse')
if tr in phonologizeds:
conformsToPhonology = True
print '%s phonologizes to %s' % (mb, tr)
else:
print '%s does not phonologize to %s, but to %s' % (
mb, tr, ', '.join(phonologizeds))
try: # If we've already parsed this, no need to do it again
bestGuess, parses = lookup[tr]
except KeyError:
parses = getParsesFromFoma(tr) # Try to get parses
if parses == [u'']: # If none, get variants
variants = getOrthographicVariants(tr)
if tr[0].isupper():
deCapped = tr[0].lower() + tr[1:]
variants = [deCapped] + \
getOrthographicVariants(deCapped) + variants
print '\nVariants:\n\t%s\n' % '\n\t'.join(variants)
for v in variants:
print '\t%s is a variant for %s' % (v, tr)
parses = getParsesFromFoma(v)
if parses != [u'']:
print '\tWe got some parses for %s!:\n%d' % (
v, len(parses))
break
else:
print '\tNo parses for %s :(' % v
if parses == [u'']:
parses = []
bestGuess = None
if parses:
probs = [probCalc.getProbability(p) for p in parses]
parses = zip(parses, probs)
parses.sort(key=lambda x: x[1])
parses.reverse()
parses = [splitBreakFromGloss(x[0]) for x in parses]
bestGuess = parses[0]
# Remember results so we don't needlessly reparse
lookup[tr] = (bestGuess, parses)
result = (tr, rightAnswer, bestGuess, parses)
results.append(result)
if conformsToPhonology:
resultsConformingToPhonology.append(result)
F = getFScore(results)
print 'All Data:'
printResult(result, F, i)
print 'Data where phonology works'
F = getFScore(resultsConformingToPhonology)
printResult(result, F, i)
i += 1
print '\n\n'
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def applyphonology(self, id):
return '<br />'.join(list(set(applyFomaPhonology(id, 'inverse'))))
@h.authenticate
@h.authorize(['administrator', 'contributor'])
@restrict('POST')
def applyphonologytodb(self):
def getVariants(word):
deCapped = word[0].lower() + word[1:]
return list(set([removeExtraneousPunctuation(word),
removeExtraneousPunctuation(deCapped)]))
def getReport(mb, tr, phonologizations):
report = u''
if tr in phonologizations or \
set(getVariants(tr)) & set(phonologizations):
span = '<span style="color: green;">'
report += u'<p>%s%s \u2192 %s</span></p>' % (span, mb, tr)
else:
span = '<span style="color: red;">'
report += u'<p>%s%s \u219B %s</span></p>\n<ul>' % (span, mb, tr)
for ph in phonologizations:
report += u'<li>%s</li>' % ph
report += u'</ul>'
return report
output = u''
# Get forms based on the search/filter provided by the user
values = urllib.unquote_plus(unicode(request.body, 'utf-8'))
values = json.loads(values)
schema = SearchFormForm()
try:
result = schema.to_python(values)
except Invalid:
return 'Unable to validate form data'
form_q = meta.Session.query(model.Form)
form_q = h.filterSearchQuery(result, form_q, 'Form')
if 'limit' in result and result['limit']:
form_q = form_q.limit(int(result['limit']))
forms = form_q.all()
log.debug(len(forms))
correct = incorrect = wordsFound = 0
mb2phonologized = {}
output += u'<p>%d Forms match your criteria</p>' % len(forms)
for form in forms:
tr = form.transcription
mb = form.morphemeBreak
if form.morphemeBreak:
if len(tr.split()) == len(mb.split()):
words = zip(tr.split(), mb.split())
for w in words:
tr = w[0]
mb = w[1]
try:
phonologizations = mb2phonologized[mb]
except KeyError:
phonologizations = list(set(applyFomaPhonology(
mb, 'inverse')))
mb2phonologized[mb] = phonologizations
wordsFound += 1
if tr in phonologizations or \
set(getVariants(tr)) & set(phonologizations):
correct += 1
else:
print '%s is not in %s' % (getVariants(tr), str(phonologizations))
output += getReport(w[1], w[0], phonologizations)
try:
percentCorrect = 100 * correct / float(wordsFound)
except ZeroDivisionError:
percentCorrect = 0.0
output += u'<p>%0.2f%% accuracy.</p>' % percentCorrect
output += u'<p>(%d words found in %d Forms).</p>' % (wordsFound, len(forms))
return output
| gpl-3.0 | 2,214,930,067,422,533,600 | 35.855042 | 94 | 0.5955 | false |
ScegfOd/wesnoth | data/tools/unit_tree/wiki_output.py | 15 | 6187 | import sys
from . import helpers
from .html_output import Translation
def main():
wesnoth = helpers.WesnothList(
options.wesnoth,
options.config_dir,
options.data_dir,
options.transdir)
translated = Translation(options.transdir, options.language)
original = Translation(options.transdir, "en_US")
punits = {}
defines = "NORMAL,ENABLE_ARMAGEDDON_DRAKE,ENABLE_DWARVISH_ARCANISTER," +\
"ENABLE_DWARVISH_RUNESMITH,ENABLE_ANCIENT_LICH,ENABLE_DEATH_KNIGHT," +\
"ENABLE_TROLL_SHAMAN,ENABLE_WOLF_ADVANCEMENT"
sys.stderr.write("Parsing core units...\n")
wesnoth.parser.parse_text("{core/units.cfg}", defines)
punits["mainline"] = wesnoth.parser.get_all(tag = "units")
punits["mainline"] += wesnoth.parser.get_all(tag = "+units")
all_campaigns = {}
sys.stderr.write("Parsing campaigns...\n")
wesnoth.parser.parse_text("{campaigns}", defines)
campaigns = wesnoth.parser.get_all(tag = "campaign")
for campaign in campaigns:
define = campaign.get_text_val("define")
ed = campaign.get_text_val("extra_defines")
if ed: define += "," + ed
name = campaign.get_text_val("name", translation = translated.translate)
sys.stderr.write("Parsing " + name + "...\n")
campaign.name = name
all_campaigns[campaign.get_text_val("id")] = campaign
wesnoth.parser.parse_text("{campaigns}", defines + "," + define)
punits[name] = wesnoth.parser.get_all(tag = "units")
punits[name] += wesnoth.parser.get_all(tag = "+units")
# Get all defined races.
races = {}
for campaign, unitslists in list(punits.items()):
for unitlist in unitslists:
for race in unitlist.get_all(tag = "race"):
races[race.get_text_val("id")] = race
# Go through all units and put them into a dictionary.
all_units = {}
for campaign, unitslists in list(punits.items()):
for unitlist in unitslists:
for unit in unitlist.get_all(tag = "unit_type"):
if unit.get_text_val("do_not_list") in ["yes", "true"]: continue
if unit.get_text_val("hide_help") in ["yes", "true"]: continue
unit.id = unit.get_text_val("id")
unit.campaign = campaign
all_units[unit.id] = unit
unit.children = []
unit.parents = []
def base_val(unit, val, translation = None):
x = unit.get_text_val(val, translation = translation)
if x: return x
for base_unit in unit.get_all(tag = "base_unit"):
base = all_units[base_unit.get_text_val("id")]
x = base_val(base, val, translation = translation)
if x: return x
return None
# Handle unit attributes
for unit in list(all_units.values()):
unit.name = base_val(unit, "name", translation = translated.translate)
unit.orig_name = base_val(unit, "name", translation = original.translate)
try: unit.level = int(base_val(unit, "level"))
except TypeError: unit.level = 0
r = base_val(unit, "race")
try: unit.race = races[r].get_text_val("plural_name", translation = translated.translate)
except KeyError: unit.race = "-"
a = unit.get_text_val("advances_to")
if not a or a == "null": unit.advances_to = []
else: unit.advances_to = [x.strip() for x in a.split(",")]
# Find children and parents of all units.
for unit in list(all_units.values()):
for aid in unit.advances_to:
unit.children.append(all_units[aid])
all_units[aid].parents.append(unit)
for af in unit.get_all(tag = "advancefrom"):
afid = af.get_text_val("unit")
all_units[afid].children.append(unit)
unit.parents.append(all_units[afid])
def race_key(unit):
if unit.campaign == "mainline": return 0, unit.race
else : return 1, unit.campaign
# Group by race/campaign
units_per_race = {}
for unit in list(all_units.values()):
x = race_key(unit)
if x not in units_per_race: units_per_race[x] = set()
units_per_race[x].add(unit)
# Recursively add all related units of a units to the same race as well.
for race in list(units_per_race.keys()):
while True:
add = []
for unit in units_per_race[race]:
for rel in unit.children + unit.parents:
if rel not in units_per_race[race]:
add.append(rel)
if not add: break
for x in add: units_per_race[race].add(x)
races = sorted(units_per_race.keys())
def w(x): sys.stdout.write(x.encode("utf8") + "\n")
# Now output the units list per race/campaign.
for race in races:
units = units_per_race[race]
w("=== " + race[1] + " ===")
w("{|")
# Find root units.
roots = []
for u in units:
if not u.parents:
roots.append(u)
continue
if not [x for x in u.parents if x.race == u.race]:
roots.append(u)
roots.sort(key = lambda u: u.name)
# Get a grid position for each unit.
def handle_children(y, unit):
unit.y = y
for cunit in unit.children:
y = handle_children(y, cunit)
if not unit.children: y += 1
return y
n = 0
for root in roots:
n = handle_children(n, root)
# Create grid.
grid = []
for j in range(n + 1):
grid.append([None] * 6)
for unit in units:
grid[unit.y][unit.level] = unit
# Output it.
for y in range(n + 1):
for x in range(6):
unit = grid[y][x]
if unit:
w("|'''" + unit.name + "'''<br />" + unit.orig_name)
else:
w("|")
w("|-")
w("|}")
| gpl-2.0 | -7,558,117,287,068,225,000 | 35.181287 | 100 | 0.54065 | false |
vanpact/scipy | scipy/stats/tests/test_rank.py | 28 | 6323 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal
from scipy.stats import rankdata, tiecorrect
class TestTieCorrect(TestCase):
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
class TestRankData(TestCase):
def test_empty(self):
"""stats.rankdata([]) should return an empty array."""
a = np.array([], dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([], dtype=np.float64))
r = rankdata([])
assert_array_equal(r, np.array([], dtype=np.float64))
def test_one(self):
"""Check stats.rankdata with an array of length 1."""
data = [100]
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
r = rankdata(data)
assert_array_equal(r, np.array([1.0], dtype=np.float64))
def test_basic(self):
"""Basic tests of stats.rankdata."""
data = [100, 10, 50]
expected = np.array([3.0, 1.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [40, 10, 30, 10, 50]
expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
data = [20, 20, 20, 10, 10, 10]
expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64)
a = np.array(data, dtype=int)
r = rankdata(a)
assert_array_equal(r, expected)
r = rankdata(data)
assert_array_equal(r, expected)
# The docstring states explicitly that the argument is flattened.
a2d = a.reshape(2, 3)
r = rankdata(a2d)
assert_array_equal(r, expected)
def test_large_int(self):
data = np.array([2**60, 2**60+1], dtype=np.uint64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, 2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [1.0, 2.0])
data = np.array([2**60, -2**60+1], dtype=np.int64)
r = rankdata(data)
assert_array_equal(r, [2.0, 1.0])
def test_big_tie(self):
for n in [10000, 100000, 1000000]:
data = np.ones(n, dtype=int)
r = rankdata(data)
expected_rank = 0.5 * (n + 1)
assert_array_equal(r, expected_rank * data,
"test failed with n=%d" % n)
_cases = (
# values, method, expected
([], 'average', []),
([], 'min', []),
([], 'max', []),
([], 'dense', []),
([], 'ordinal', []),
#
([100], 'average', [1.0]),
([100], 'min', [1.0]),
([100], 'max', [1.0]),
([100], 'dense', [1.0]),
([100], 'ordinal', [1.0]),
#
([100, 100, 100], 'average', [2.0, 2.0, 2.0]),
([100, 100, 100], 'min', [1.0, 1.0, 1.0]),
([100, 100, 100], 'max', [3.0, 3.0, 3.0]),
([100, 100, 100], 'dense', [1.0, 1.0, 1.0]),
([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]),
#
([100, 300, 200], 'average', [1.0, 3.0, 2.0]),
([100, 300, 200], 'min', [1.0, 3.0, 2.0]),
([100, 300, 200], 'max', [1.0, 3.0, 2.0]),
([100, 300, 200], 'dense', [1.0, 3.0, 2.0]),
([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]),
#
([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]),
([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]),
([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]),
([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]),
([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]),
#
([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]),
([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]),
([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]),
([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]),
([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]),
#
([10] * 30, 'ordinal', np.arange(1.0, 31.0)),
)
def test_cases():
def check_case(values, method, expected):
r = rankdata(values, method=method)
assert_array_equal(r, expected)
for values, method, expected in _cases:
yield check_case, values, method, expected
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -1,244,138,375,132,788,200 | 31.761658 | 77 | 0.504982 | false |
pyNLO/PyNLO | src/pynlo/util/pynlo_ffts.py | 2 | 1292 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 19 12:08:09 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: Gabe-Local
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from numpy import fft
def FFT_t(A,ax=0):
return fft.ifftshift(fft.ifft(fft.fftshift(A,axes=(ax,)),axis=ax),axes=(ax,))
def IFFT_t(A,ax=0):
return fft.ifftshift(fft.fft(fft.fftshift(A,axes=(ax,)),axis=ax),axes=(ax,))
# these last two are defined in laserFOAM but never used
def FFT_x(self,A):
return fft.ifftshift(fft.fft(fft.fftshift(A)))
def IFFT_x(self,A):
return fft.ifftshift(fft.ifft(fft.fftshift(A)))
| gpl-3.0 | -4,912,161,479,920,580,000 | 35.914286 | 81 | 0.709752 | false |
jvassev/dd-agent | jmxfetch.py | 14 | 18408 | # set up logging before importing any other components
if __name__ == '__main__':
from config import initialize_logging # noqa
initialize_logging('jmxfetch')
# std
import glob
import logging
import os
import signal
import sys
import time
# 3rd party
import yaml
# datadog
from config import (
DEFAULT_CHECK_FREQUENCY,
get_confd_path,
get_config,
get_logging_config,
PathNotFound,
)
from util import yLoader
from utils.jmx import JMX_FETCH_JAR_NAME, JMXFiles
from utils.platform import Platform
from utils.subprocess_output import subprocess
log = logging.getLogger('jmxfetch')
JAVA_LOGGING_LEVEL = {
logging.CRITICAL: "FATAL",
logging.DEBUG: "DEBUG",
logging.ERROR: "ERROR",
logging.FATAL: "FATAL",
logging.INFO: "INFO",
logging.WARN: "WARN",
logging.WARNING: "WARN",
}
_JVM_DEFAULT_MAX_MEMORY_ALLOCATION = " -Xmx200m"
_JVM_DEFAULT_INITIAL_MEMORY_ALLOCATION = " -Xms50m"
JMXFETCH_MAIN_CLASS = "org.datadog.jmxfetch.App"
JMX_CHECKS = [
'activemq',
'activemq_58',
'cassandra',
'jmx',
'solr',
'tomcat',
]
JMX_COLLECT_COMMAND = 'collect'
JMX_LIST_COMMANDS = {
'list_everything': 'List every attributes available that has a type supported by JMXFetch',
'list_collected_attributes': 'List attributes that will actually be collected by your current instances configuration',
'list_matching_attributes': 'List attributes that match at least one of your instances configuration',
'list_not_matching_attributes': "List attributes that don't match any of your instances configuration",
'list_limited_attributes': "List attributes that do match one of your instances configuration but that are not being collected because it would exceed the number of metrics that can be collected",
JMX_COLLECT_COMMAND: "Start the collection of metrics based on your current configuration and display them in the console"}
LINK_TO_DOC = "See http://docs.datadoghq.com/integrations/java/ for more information"
class InvalidJMXConfiguration(Exception):
pass
class JMXFetch(object):
"""
Start JMXFetch if any JMX check is configured
"""
def __init__(self, confd_path, agentConfig):
self.confd_path = confd_path
self.agentConfig = agentConfig
self.logging_config = get_logging_config()
self.check_frequency = DEFAULT_CHECK_FREQUENCY
self.jmx_process = None
self.jmx_checks = None
def terminate(self):
self.jmx_process.terminate()
def _handle_sigterm(self, signum, frame):
# Terminate jmx process on SIGTERM signal
log.debug("Caught sigterm. Stopping subprocess.")
self.jmx_process.terminate()
def register_signal_handlers(self):
"""
Enable SIGTERM and SIGINT handlers
"""
try:
# Gracefully exit on sigterm
signal.signal(signal.SIGTERM, self._handle_sigterm)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
except ValueError:
log.exception("Unable to register signal handlers.")
def configure(self, checks_list=None, clean_status_file=True):
"""
Instantiate JMXFetch parameters, clean potential previous run leftovers.
"""
if clean_status_file:
JMXFiles.clean_status_file()
self.jmx_checks, self.invalid_checks, self.java_bin_path, self.java_options, self.tools_jar_path = \
self.get_configuration(self.confd_path, checks_list=checks_list)
def should_run(self):
"""
Should JMXFetch run ?
"""
return self.jmx_checks is not None and self.jmx_checks != []
def run(self, command=None, checks_list=None, reporter=None, redirect_std_streams=False):
"""
Run JMXFetch
redirect_std_streams: if left to False, the stdout and stderr of JMXFetch are streamed
directly to the environment's stdout and stderr and cannot be retrieved via python's
sys.stdout and sys.stderr. Set to True to redirect these streams to python's sys.stdout
and sys.stderr.
"""
if checks_list or self.jmx_checks is None:
# (Re)set/(re)configure JMXFetch parameters when `checks_list` is specified or
# no configuration was found
self.configure(checks_list)
try:
command = command or JMX_COLLECT_COMMAND
if len(self.invalid_checks) > 0:
try:
JMXFiles.write_status_file(self.invalid_checks)
except Exception:
log.exception("Error while writing JMX status file")
if len(self.jmx_checks) > 0:
return self._start(self.java_bin_path, self.java_options, self.jmx_checks,
command, reporter, self.tools_jar_path, redirect_std_streams)
else:
# We're exiting purposefully, so exit with zero (supervisor's expected
# code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
# and thus can exit cleanly.
time.sleep(4)
log.info("No valid JMX integration was found. Exiting ...")
except Exception:
log.exception("Error while initiating JMXFetch")
raise
@classmethod
def get_configuration(cls, confd_path, checks_list=None):
"""
Return a tuple (jmx_checks, invalid_checks, java_bin_path, java_options, tools_jar_path)
jmx_checks: list of yaml files that are jmx checks
(they have the is_jmx flag enabled or they are in JMX_CHECKS)
and that have at least one instance configured
invalid_checks: dictionary whose keys are check names that are JMX checks but
they have a bad configuration. Values of the dictionary are exceptions generated
when checking the configuration
java_bin_path: is the path to the java executable. It was
previously set in the "instance" part of the yaml file of the
jmx check. So we need to parse yaml files to get it.
We assume that this value is alwayws the same for every jmx check
so we can return the first value returned
java_options: is string contains options that will be passed to java_bin_path
We assume that this value is alwayws the same for every jmx check
so we can return the first value returned
tools_jar_path: Path to tools.jar, which is only part of the JDK and that is
required to connect to a local JMX instance using the attach api.
"""
jmx_checks = []
java_bin_path = None
java_options = None
tools_jar_path = None
invalid_checks = {}
for conf in glob.glob(os.path.join(confd_path, '*.yaml')):
filename = os.path.basename(conf)
check_name = filename.split('.')[0]
if os.path.exists(conf):
f = open(conf)
try:
check_config = yaml.load(f.read(), Loader=yLoader)
assert check_config is not None
f.close()
except Exception:
f.close()
log.error("Unable to parse yaml config in %s" % conf)
continue
try:
is_jmx, check_java_bin_path, check_java_options, check_tools_jar_path = \
cls._is_jmx_check(check_config, check_name, checks_list)
if is_jmx:
jmx_checks.append(filename)
if java_bin_path is None and check_java_bin_path is not None:
java_bin_path = check_java_bin_path
if java_options is None and check_java_options is not None:
java_options = check_java_options
if tools_jar_path is None and check_tools_jar_path is not None:
tools_jar_path = check_tools_jar_path
except InvalidJMXConfiguration, e:
log.error("%s check does not have a valid JMX configuration: %s" % (check_name, e))
# Make sure check_name is a string - Fix issues with Windows
check_name = check_name.encode('ascii', 'ignore')
invalid_checks[check_name] = str(e)
return (jmx_checks, invalid_checks, java_bin_path, java_options, tools_jar_path)
def _start(self, path_to_java, java_run_opts, jmx_checks, command, reporter, tools_jar_path, redirect_std_streams):
statsd_port = self.agentConfig.get('dogstatsd_port', "8125")
if reporter is None:
reporter = "statsd:%s" % str(statsd_port)
log.info("Starting jmxfetch:")
try:
path_to_java = path_to_java or "java"
java_run_opts = java_run_opts or ""
path_to_jmxfetch = self._get_path_to_jmxfetch()
path_to_status_file = JMXFiles.get_status_file_path()
if tools_jar_path is None:
classpath = path_to_jmxfetch
else:
classpath = r"%s:%s" % (tools_jar_path, path_to_jmxfetch)
subprocess_args = [
path_to_java, # Path to the java bin
'-classpath',
classpath,
JMXFETCH_MAIN_CLASS,
'--check_period', str(self.check_frequency * 1000), # Period of the main loop of jmxfetch in ms
'--conf_directory', r"%s" % self.confd_path, # Path of the conf.d directory that will be read by jmxfetch,
'--log_level', JAVA_LOGGING_LEVEL.get(self.logging_config.get("log_level"), "INFO"), # Log Level: Mapping from Python log level to log4j log levels
'--log_location', r"%s" % self.logging_config.get('jmxfetch_log_file'), # Path of the log file
'--reporter', reporter, # Reporter to use
'--status_location', r"%s" % path_to_status_file, # Path to the status file to write
command, # Name of the command
]
if Platform.is_windows():
# Signal handlers are not supported on Windows:
# use a file to trigger JMXFetch exit instead
path_to_exit_file = JMXFiles.get_python_exit_file_path()
subprocess_args.insert(len(subprocess_args) - 1, '--exit_file_location')
subprocess_args.insert(len(subprocess_args) - 1, path_to_exit_file)
subprocess_args.insert(4, '--check')
for check in jmx_checks:
subprocess_args.insert(5, check)
# Specify a maximum memory allocation pool for the JVM
if "Xmx" not in java_run_opts and "XX:MaxHeapSize" not in java_run_opts:
java_run_opts += _JVM_DEFAULT_MAX_MEMORY_ALLOCATION
# Specify the initial memory allocation pool for the JVM
if "Xms" not in java_run_opts and "XX:InitialHeapSize" not in java_run_opts:
java_run_opts += _JVM_DEFAULT_INITIAL_MEMORY_ALLOCATION
for opt in java_run_opts.split():
subprocess_args.insert(1, opt)
log.info("Running %s" % " ".join(subprocess_args))
# Launch JMXfetch subprocess
jmx_process = subprocess.Popen(
subprocess_args,
close_fds=not redirect_std_streams, # set to True instead of False when the streams are redirected for WIN compatibility
stdout=subprocess.PIPE if redirect_std_streams else None,
stderr=subprocess.PIPE if redirect_std_streams else None
)
self.jmx_process = jmx_process
# Register SIGINT and SIGTERM signal handlers
self.register_signal_handlers()
if redirect_std_streams:
# Wait for JMXFetch to return, and write out the stdout and stderr of JMXFetch to sys.stdout and sys.stderr
out, err = jmx_process.communicate()
sys.stdout.write(out)
sys.stderr.write(err)
else:
# Wait for JMXFetch to return
jmx_process.wait()
return jmx_process.returncode
except OSError:
java_path_msg = "Couldn't launch JMXTerm. Is Java in your PATH ?"
log.exception(java_path_msg)
invalid_checks = {}
for check in jmx_checks:
check_name = check.split('.')[0]
check_name = check_name.encode('ascii', 'ignore')
invalid_checks[check_name] = java_path_msg
JMXFiles.write_status_file(invalid_checks)
raise
except Exception:
log.exception("Couldn't launch JMXFetch")
raise
@staticmethod
def _is_jmx_check(check_config, check_name, checks_list):
init_config = check_config.get('init_config', {}) or {}
java_bin_path = None
java_options = None
is_jmx = False
is_attach_api = False
tools_jar_path = init_config.get("tools_jar_path")
if init_config is None:
init_config = {}
if checks_list:
if check_name in checks_list:
is_jmx = True
elif init_config.get('is_jmx') or check_name in JMX_CHECKS:
is_jmx = True
if is_jmx:
instances = check_config.get('instances', [])
if type(instances) != list or len(instances) == 0:
raise InvalidJMXConfiguration("You need to have at least one instance "
"defined in the YAML file for this check")
for inst in instances:
if type(inst) != dict:
raise InvalidJMXConfiguration("Each instance should be"
" a dictionary. %s" % LINK_TO_DOC)
host = inst.get('host', None)
port = inst.get('port', None)
conf = inst.get('conf', init_config.get('conf', None))
tools_jar_path = inst.get('tools_jar_path')
# Support for attach api using a process name regex
proc_regex = inst.get('process_name_regex')
if proc_regex is not None:
is_attach_api = True
else:
if host is None:
raise InvalidJMXConfiguration("A host must be specified")
if port is None or type(port) != int:
raise InvalidJMXConfiguration("A numeric port must be specified")
if conf is None:
log.warning("%s doesn't have a 'conf' section. Only basic JVM metrics"
" will be collected. %s" % (inst, LINK_TO_DOC))
else:
if type(conf) != list or len(conf) == 0:
raise InvalidJMXConfiguration("'conf' section should be a list"
" of configurations %s" % LINK_TO_DOC)
for config in conf:
include = config.get('include', None)
if include is None:
raise InvalidJMXConfiguration("Each configuration must have an"
" 'include' section. %s" % LINK_TO_DOC)
if type(include) != dict:
raise InvalidJMXConfiguration("'include' section must"
" be a dictionary %s" % LINK_TO_DOC)
if java_bin_path is None:
if init_config and init_config.get('java_bin_path'):
# We get the java bin path from the yaml file
# for backward compatibility purposes
java_bin_path = init_config.get('java_bin_path')
else:
for instance in instances:
if instance and instance.get('java_bin_path'):
java_bin_path = instance.get('java_bin_path')
if java_options is None:
if init_config and init_config.get('java_options'):
java_options = init_config.get('java_options')
else:
for instance in instances:
if instance and instance.get('java_options'):
java_options = instance.get('java_options')
if is_attach_api:
if tools_jar_path is None:
for instance in instances:
if instance and instance.get("tools_jar_path"):
tools_jar_path = instance.get("tools_jar_path")
if tools_jar_path is None:
raise InvalidJMXConfiguration("You must specify the path to tools.jar"
" in your JDK.")
elif not os.path.isfile(tools_jar_path):
raise InvalidJMXConfiguration("Unable to find tools.jar at %s" % tools_jar_path)
else:
tools_jar_path = None
return is_jmx, java_bin_path, java_options, tools_jar_path
def _get_path_to_jmxfetch(self):
if not Platform.is_windows():
return os.path.realpath(os.path.join(os.path.abspath(__file__), "..", "checks",
"libs", JMX_FETCH_JAR_NAME))
return os.path.realpath(os.path.join(os.path.abspath(__file__), "..", "..",
"jmxfetch", JMX_FETCH_JAR_NAME))
def init(config_path=None):
agentConfig = get_config(parse_args=False, cfg_path=config_path)
try:
confd_path = get_confd_path()
except PathNotFound, e:
log.error("No conf.d folder found at '%s' or in the directory where"
"the Agent is currently deployed.\n" % e.args[0])
return confd_path, agentConfig
def main(config_path=None):
""" JMXFetch main entry point """
confd_path, agentConfig = init(config_path)
jmx = JMXFetch(confd_path, agentConfig)
return jmx.run()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -8,383,728,626,071,692,000 | 40.931663 | 200 | 0.570621 | false |
marosi/SocialDesktopClient | plugins/buddycloud/3rdparty/swift/3rdParty/SCons/scons-local-2.0.0.final.0/SCons/Tool/suncc.py | 34 | 1978 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 5023 2010/06/14 22:05:46 scons"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | -8,685,766,431,366,180,000 | 33.103448 | 95 | 0.729525 | false |
Acehaidrey/incubator-airflow | airflow/contrib/secrets/hashicorp_vault.py | 8 | 1152 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.hashicorp.secrets.vault`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.hashicorp.secrets.vault import VaultBackend # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.hashicorp.secrets.vault`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | -3,671,080,068,118,884,400 | 37.4 | 89 | 0.769097 | false |
swprojects/Advanced-Action-Scheduler | advancedactionscheduler/base.py | 1 | 18977 | # -*- coding: utf-8 -*
"""
@author Simon Wu <[email protected]>
Copyright (c) 2018 by Simon Wu <Advanced Action Scheduler>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
import platform
import wx
import wx.dataview
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin, CheckListCtrlMixin
PLATFORM = platform.system()
class TreeListCtrl(wx.dataview.TreeListCtrl):
def __init__(self, parent, style=None, **kwargs):
"""
Tree data format
index = "n,n,n, ..."
...where the first n is a top-level parent. Subsequent n
are children of the left n
"""
self.parent = parent
if not style:
style = wx.dataview.TL_CHECKBOX
wx.dataview.TreeListCtrl.__init__(self, parent, style=style, **kwargs)
def AppendItemToRoot(self, value):
item = self.AppendItem(self.GetRootItem(), value)
return item
def DeleteAllItems(self):
if PLATFORM == "Windows":
super(TreeListCtrl, self).DeleteAllItems()
else:
# workaround for GTK
item = self.GetFirstItem()
while item.IsOk():
self.DeleteItem(item)
item = self.GetFirstItem()
def GetItemDepth(self, item):
""" backwards """
depth = 0
while self.GetItemParent(item).IsOk():
depth += 1
item = self.GetItemParent(item)
return depth - 1
def GetIndexByOrder(self, item):
""" iterate through all items until we reach the item """
if not item.IsOk():
return None
n = 0
itemIter = self.GetFirstItem()
if not itemIter.IsOk():
return None
while (itemIter != item):
itemIter = self.GetNextItem(itemIter)
n += 1
if not itemIter.IsOk():
break
return n
def GetItemIndex(self, item):
""" hacky way of getting the item index """
selection = item
item = self.GetFirstItem()
lastItem = item
if not item.IsOk():
return -1
row = 0
idx = "0"
root = self.GetRootItem()
items = []
while item.IsOk():
# first top level item
if lastItem == item:
idx = "0"
row += 1
# top level item
elif self.GetItemParent(item) == root:
idx = str(row)
row += 1
# first child of previous item
elif item == self.GetFirstChild(lastItem):
# print(self.GetItemText(item, 0))
idx += ",0"
# sibling of previous item
elif item == self.GetNextSibling(lastItem):
idx = idx.split(",")
next = int(idx[-1]) + 1 # ...and increment last
idx = idx[0:-1]
idx.append(str(next))
idx = ",".join(idx)
# sibling of parent
elif item == self.GetNextSibling(self.GetItemParent(lastItem)):
idx = idx.split(",")[:-1]
# increment last element
next = int(idx[-1]) + 1
del idx[-1]
idx.append(str(next))
idx = ",".join(idx)
else:
for itm, itmIdx in items:
if self.GetNextSibling(itm) != item:
continue
idx = itmIdx.split(",")
next = int(idx[-1]) + 1 # ...and increment last
idx = idx[0:-1]
idx.append(str(next))
idx = ",".join(idx)
break
if item == selection:
break
lastItem = item
items.append((item, idx))
item = self.GetNextItem(item)
return idx
def GetLastChild(self, item):
child = self.GetFirstChild(item)
while child.IsOk():
child = self.GetNextSibling(child)
return child
def GetLastSibling(self, item):
while self.GetNextSibling(item).IsOk():
item = self.GetNextSibling(item)
return item
def GetPreviousSibling(self, item):
parent = self.GetItemParent(item)
sib = self.GetNextItem(parent)
if item == sib:
return -1
while self.GetNextSibling(sib) != item:
sib = self.GetNextSibling(sib)
return sib
def GetSubTree(self, item):
""" return the sub tree of schedule item """
# we stop when item is a sibling
selectedDepth = self.GetItemDepth(item)
data = []
columnCount = self.GetColumnCount()
depth = selectedDepth
idx = "0"
firstItem = item
while item.IsOk():
d = self.GetItemDepth(item)
# have we reached sibling
if d <= selectedDepth and firstItem != item:
break
# selected item is first item
if d == selectedDepth:
pass
# depth unchanged, item is the next sibling of previous item
elif d == depth:
idx = idx.split(",")
next = int(idx[-1]) + 1 # ...and increment last
idx = idx[0:-1]
idx.append(str(next))
idx = ",".join(idx)
# a child of previous item
elif d > depth:
idx += ",0"
# sibling of parent
elif d < depth:
idx = idx.split(",")[:depth]
# increment last element
next = int(idx[-1]) + 1
del idx[-1]
idx.append(str(next))
idx = ",".join(idx)
depth = d
idxData = {}
idxData["columns"] = {str(c): self.GetItemText(item, c) for c in range(columnCount)}
idxData["checked"] = self.GetCheckedState(item)
idxData["expanded"] = self.IsExpanded(item)
idxData["selected"] = self.IsSelected(item)
data.append((idx, idxData))
item = self.GetNextItem(item)
return data
def GetTopLevelParent(self, item):
while self.GetItemParent(item) != self.GetRootItem():
item = self.GetItemParent(item)
return item
def GetTree(self):
data = []
item = self.GetFirstItem()
items = []
lastItem = item
if not item.IsOk():
return data
columnCount = self.GetColumnCount()
row = 0
idx = "0"
root = self.GetRootItem()
while item.IsOk():
# first top level item
if lastItem == item:
idx = "0"
row += 1
# top level item
elif self.GetItemParent(item) == root:
idx = str(row)
row += 1
# first child of previous item
elif item == self.GetFirstChild(lastItem):
# print(self.GetItemText(item, 0))
idx += ",0"
# sibling of previous item
elif item == self.GetNextSibling(lastItem):
idx = idx.split(",")
next = int(idx[-1]) + 1 # ...and increment last
idx = idx[0:-1]
idx.append(str(next))
idx = ",".join(idx)
# sibling of parent
elif item == self.GetNextSibling(self.GetItemParent(lastItem)):
idx = idx.split(",")[:-1]
# increment last element
next = int(idx[-1]) + 1
del idx[-1]
idx.append(str(next))
idx = ",".join(idx)
else:
for itm, itmIdx in items:
if self.GetNextSibling(itm) != item:
continue
idx = itmIdx.split(",")
next = int(idx[-1]) + 1 # ...and increment last
idx = idx[0:-1]
idx.append(str(next))
idx = ",".join(idx)
break
idxData = {}
idxData["columns"] = {str(c): self.GetItemText(item, c) for c in range(columnCount)}
idxData["checked"] = self.GetCheckedState(item)
idxData["expanded"] = self.IsExpanded(item)
idxData["selected"] = self.IsSelected(item)
data.append((idx, idxData))
items.append((item, idx))
lastItem = item
item = self.GetNextItem(item)
return data
def AppendSubTree(self, parent, data):
""" append sub tree to item """
if not data:
return
items = {}
expandedItems = []
for idx, idxData in data:
columns = idxData["columns"]
if "," not in idx:
item = self.AppendItem(parent, columns["0"])
else:
parentIdx = idx.split(",")[:-1]
parentIdx = ",".join(parentIdx)
item = self.AppendItem(items[parentIdx], columns["0"])
for c in range(1, len(columns)):
try:
self.SetItemText(items[idx], str(c), columns[c])
except Exception as e:
logging.debug(e)
items[idx] = item
checked = idxData["checked"]
if checked == 1:
self.CheckItem(item)
else:
self.UncheckItem(item)
selected = idxData["selected"]
if selected is True:
self.Select(item)
expanded = idxData["expanded"]
if expanded is True:
expandedItems.append(item)
items[idx] = item
for item in expandedItems:
self.Expand(item)
def InsertSubTree(self, previous, data):
""" insert sub tree after previous item """
if not data:
return
items = {}
expandedItems = []
for idx, idxData in data:
columns = idxData["columns"]
if "," not in idx:
try:
# this ensures top level items pasted in correct order
previous = items[str(int(idx) - 1)]
except Exception as e:
logging.debug(e)
if previous == -1:
item = self.PrependItem(self.GetItemParent(self.GetSelection()), columns["0"])
elif not previous.IsOk():
if self.GetSelection().IsOk():
item = self.AppendItem(self.GetSelection(), columns["0"])
else:
item = self.AppendItem(self.GetRootItem(), columns["0"])
else:
item = self.InsertItem(self.GetItemParent(previous), previous, columns["0"])
else:
parent = idx.split(",")[:-1]
parent = ",".join(parent)
item = self.AppendItem(items[parent], columns["0"])
for c in range(1, len(columns)):
try:
self.SetItemText(items[idx], str(c), columns[c])
except Exception as e:
logging.debug(e)
items[idx] = item
checked = idxData["checked"]
if checked == 1:
self.CheckItem(item)
else:
self.UncheckItem(item)
selected = idxData["selected"]
if selected is True:
self.Select(item)
expanded = idxData["expanded"]
if expanded is True:
expandedItems.append(item)
items[idx] = item
for item in expandedItems:
self.Expand(item)
def IsTopLevel(self, item):
return self.GetItemParent(item) == self.GetRootItem()
def SelectItemByOrder(self, n):
""" step through until we reach the nth item """
# print("SelectItemByOrder", n)
if n is None:
return
assert n >= 0, "Must be greater/equal to 0"
count = 0
itemIter = self.GetFirstItem()
while n != count:
itemIter = self.GetNextItem(itemIter)
count += 1
self.Select(itemIter)
def SetTree(self, data):
""" set the treelist """
self.DeleteAllItems()
if not data:
return
items = {}
expandedItems = []
for idx, idxData in data:
columns = idxData["columns"]
if "," not in idx:
item = self.AppendItemToRoot(columns["0"])
else:
parent = idx.split(",")[:-1]
parent = ",".join(parent)
item = self.AppendItem(items[parent], columns["0"])
for c in range(1, len(columns)):
try:
self.SetItemText(items[idx], str(c), columns[c])
except Exception as e:
logging.debug(e)
items[idx] = item
checked = idxData["checked"]
if checked == 1:
self.CheckItem(item)
else:
self.UncheckItem(item)
selected = idxData["selected"]
if selected is True:
self.Select(item)
expanded = idxData["expanded"]
if expanded is True:
logging.debug(expanded)
expandedItems.append(item)
items[idx] = item
for item in expandedItems:
self.Expand(item)
class ToolTip(wx.ToolTip):
def __init__(self, tip):
wx.ToolTip.__init__(self, tip)
self.SetDelay(50)
self.SetAutoPop(20000)
self.Enable(True)
# self.SetDelay
class CheckList(wx.ListCtrl, ListCtrlAutoWidthMixin, CheckListCtrlMixin):
def __init__(self, parent, style=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SINGLE_SEL):
wx.ListCtrl.__init__(self, parent, style=style)
# ListCtrlAutoWidthMixin.__init__(self)
CheckListCtrlMixin.__init__(self)
def DeselectAll(self):
first = self.GetFirstSelected()
if first == -1:
return
self.Select(first, on=0)
item = first
while self.GetNextSelected(item) != -1:
item = self.GetNextSelected(item)
self.Select(self.GetNextSelected(item), on=0)
class BaseList(wx.ListCtrl, ListCtrlAutoWidthMixin):
def __init__(self, parent, style=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SINGLE_SEL):
wx.ListCtrl.__init__(self, parent, style=style)
ListCtrlAutoWidthMixin.__init__(self)
def DeleteSelected(self):
selected = self.GetFirstSelected()
while selected != -1:
self.DeleteItem(selected)
selected = self.GetFirstSelected()
def DeselectAll(self):
first = self.GetFirstSelected()
if first == -1:
return
self.Select(first, on=0)
item = first
while self.GetNextSelected(item) != -1:
item = self.GetNextSelected(item)
self.Select(self.GetNextSelected(item), on=0)
def MoveSelectedItemsDown(self):
items = []
selected = self.GetFirstSelected()
while selected != -1:
items.append(selected)
selected = self.GetNextSelected(selected)
if items == [self.GetItemCount() - 1]:
return
rowBlockStart = None # consecutive selected rows
for row in items:
if row + 1 in items:
if rowBlockStart is None:
rowBlockStart = row
continue
if row == self.GetItemCount() - 1:
return
itemText = []
if rowBlockStart:
for col in range(0, self.GetColumnCount()):
itemText.append(self.GetItemText(row + 1, col))
self.DeleteItem(row + 1)
new = self.InsertItem(rowBlockStart, itemText[0])
rowBlockStart = None
else:
for col in range(0, self.GetColumnCount()):
itemText.append(self.GetItemText(row, col))
self.DeleteItem(row)
new = self.InsertItem(row + 1, itemText[0])
self.Select(new)
for col in range(0, self.GetColumnCount()):
self.SetItem(new, col, itemText[col])
def MoveSelectedItemsUp(self):
items = []
selected = self.GetFirstSelected()
while selected != -1:
items.append(selected)
selected = self.GetNextSelected(selected)
if items == [0]:
return
limit = 0
for row in items:
if row == limit:
limit += 1
continue
text = self.GetItemText(row)
self.DeleteItem(row)
new = self.InsertItem(row - 1, text)
self.Select(new)
class InputDialog(wx.Dialog):
def __init__(self, parent, title="", caption=""):
wx.Dialog.__init__(self,
parent,
style=wx.DEFAULT_DIALOG_STYLE)
self.SetTitle(title)
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
# hsizer = wx.BoxSizer(wx.HORIZONTAL)
caption = wx.StaticText(panel, label=caption)
# hsizer.Add(caption, 0, wx.ALL|wx.EXPAND)
self.input = wx.TextCtrl(panel, value="")
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
hsizer2.AddStretchSpacer()
for label, id in [("Ok", wx.ID_OK),
("Cancel", wx.ID_CANCEL)]:
btn = wx.Button(panel, id=id, label=label)
btn.Bind(wx.EVT_BUTTON, self.OnButton)
hsizer2.Add(btn, 0, wx.ALL, 2)
sizer.AddSpacer(20)
sizer.Add(caption, 0, wx.ALL|wx.EXPAND, 5)
sizer.Add(self.input, 0, wx.ALL|wx.EXPAND, 5)
sizer.AddStretchSpacer()
sizer.Add(wx.StaticLine(panel), 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer2, 0, wx.ALL|wx.ALIGN_RIGHT, 5)
panel.SetSizer(sizer)
# idx events binding
self.input.Bind(wx.EVT_idx_UP, self.OnidxUp)
def OnidxUp(self, event):
idxcode = event.GetidxCode()
if idxcode == wx.WXK_ESCAPE:
self.EndModal(wx.ID_CANCEL)
elif idxcode == wx.WXK_RETURN:
self.EndModal(wx.ID_OK)
event.Skip()
def GetValue(self):
return self.input.GetValue()
def OnButton(self, event):
e = event.GetEventObject()
id = e.GetId()
self.EndModal(id)
| gpl-2.0 | -121,047,236,581,522,830 | 29.170111 | 98 | 0.502292 | false |
dims/cinder | cinder/tests/unit/api/contrib/test_volume_host_attribute.py | 2 | 5323 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import webob
from cinder import context
from cinder import db
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_volume
from cinder import volume
def fake_db_volume_get(*args, **kwargs):
return {
'id': 'fake',
'host': 'host001',
'status': 'available',
'size': 5,
'availability_zone': 'somewhere',
'created_at': timeutils.utcnow(),
'display_name': 'anothervolume',
'display_description': 'Just another volume!',
'volume_type_id': None,
'snapshot_id': None,
'project_id': 'fake',
'migration_status': None,
'_name_id': 'fake2',
'attach_status': 'detached',
}
def fake_volume_api_get(*args, **kwargs):
ctx = context.RequestContext('admin', 'fake', True)
db_volume = fake_db_volume_get()
return fake_volume.fake_volume_obj(ctx, **db_volume)
def fake_volume_get_all(*args, **kwargs):
return objects.VolumeList(objects=[fake_volume_api_get()])
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeHostAttributeTest(test.TestCase):
def setUp(self):
super(VolumeHostAttributeTest, self).setUp()
self.stubs.Set(volume.api.API, 'get', fake_volume_api_get)
self.stubs.Set(volume.api.API, 'get_all', fake_volume_get_all)
self.stubs.Set(db, 'volume_get', fake_db_volume_get)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = jsonutils.loads(res.body)['volume']
self.assertEqual('host001', vol['os-vol-host-attr:host'])
def test_get_volume_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = jsonutils.loads(res.body)['volume']
self.assertNotIn('os-vol-host-attr:host', vol)
def test_list_detail_volumes_allowed(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = jsonutils.loads(res.body)['volumes']
self.assertEqual('host001', vol[0]['os-vol-host-attr:host'])
def test_list_detail_volumes_unallowed(self):
ctx = context.RequestContext('non-admin', 'fake', False)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = jsonutils.loads(res.body)['volumes']
self.assertNotIn('os-vol-host-attr:host', vol[0])
def test_list_simple_volumes_no_host(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes')
req.method = 'GET'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = jsonutils.loads(res.body)['volumes']
self.assertNotIn('os-vol-host-attr:host', vol[0])
def test_get_volume_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/%s' % self.UUID)
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = etree.XML(res.body)
host_key = ('{http://docs.openstack.org/volume/ext/'
'volume_host_attribute/api/v2}host')
self.assertEqual('host001', vol.get(host_key))
def test_list_volumes_detail_xml(self):
ctx = context.RequestContext('admin', 'fake', True)
req = webob.Request.blank('/v2/fake/volumes/detail')
req.method = 'GET'
req.accept = 'application/xml'
req.environ['cinder.context'] = ctx
res = req.get_response(app())
vol = list(etree.XML(res.body))[0]
host_key = ('{http://docs.openstack.org/volume/ext/'
'volume_host_attribute/api/v2}host')
self.assertEqual('host001', vol.get(host_key))
| apache-2.0 | 1,668,633,759,151,440,100 | 35.710345 | 77 | 0.629156 | false |
scality/cinder | cinder/zonemanager/utils.py | 23 | 4376 | # (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Utility functions related to the Zone Manager.
"""
import logging
from oslo_log import log
from cinder.i18n import _LI, _LW
from cinder.volume import configuration
from cinder.volume import manager
from cinder.zonemanager import fc_san_lookup_service
from cinder.zonemanager import fc_zone_manager
LOG = log.getLogger(__name__)
LOG.logger.setLevel(logging.DEBUG)
def create_zone_manager():
"""If zoning is enabled, build the Zone Manager."""
config = configuration.Configuration(manager.volume_manager_opts)
LOG.debug("Zoning mode: %s", config.safe_get('zoning_mode'))
if config.safe_get('zoning_mode') == 'fabric':
LOG.debug("FC Zone Manager enabled.")
zm = fc_zone_manager.ZoneManager()
LOG.info(_LI("Using FC Zone Manager %(zm_version)s,"
" Driver %(drv_name)s %(drv_version)s."),
{'zm_version': zm.get_version(),
'drv_name': zm.driver.__class__.__name__,
'drv_version': zm.driver.get_version()})
return zm
else:
LOG.debug("FC Zone Manager not enabled in cinder.conf.")
return None
def create_lookup_service():
config = configuration.Configuration(manager.volume_manager_opts)
LOG.debug("Zoning mode: %s", config.safe_get('zoning_mode'))
if config.safe_get('zoning_mode') == 'fabric':
LOG.debug("FC Lookup Service enabled.")
lookup = fc_san_lookup_service.FCSanLookupService()
LOG.info(_LI("Using FC lookup service %s"), lookup.lookup_service)
return lookup
else:
LOG.debug("FC Lookup Service not enabled in cinder.conf.")
return None
def get_formatted_wwn(wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower()
def AddFCZone(initialize_connection):
"""Decorator to add a FC Zone."""
def decorator(self, *args, **kwargs):
conn_info = initialize_connection(self, *args, **kwargs)
if not conn_info:
LOG.warning(_LW("Driver didn't return connection info, "
"can't add zone."))
return None
vol_type = conn_info.get('driver_volume_type', None)
if vol_type == 'fibre_channel':
if 'initiator_target_map' in conn_info['data']:
init_target_map = conn_info['data']['initiator_target_map']
zm = create_zone_manager()
if zm:
LOG.debug("Add FC Zone for mapping '%s'.",
init_target_map)
zm.add_connection(init_target_map)
return conn_info
return decorator
def RemoveFCZone(terminate_connection):
"""Decorator for FC drivers to remove zone."""
def decorator(self, *args, **kwargs):
conn_info = terminate_connection(self, *args, **kwargs)
if not conn_info:
LOG.warning(_LW("Driver didn't return connection info from "
"terminate_connection call."))
return None
vol_type = conn_info.get('driver_volume_type', None)
if vol_type == 'fibre_channel':
if 'initiator_target_map' in conn_info['data']:
init_target_map = conn_info['data']['initiator_target_map']
zm = create_zone_manager()
if zm:
LOG.debug("Remove FC Zone for mapping '%s'.",
init_target_map)
zm.delete_connection(init_target_map)
return conn_info
return decorator
| apache-2.0 | 3,327,257,172,252,760,600 | 35.165289 | 78 | 0.603519 | false |
shootstar/novatest | nova/api/openstack/compute/limits.py | 3 | 16428 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
This module handles rate liming at a per-user level, so it should not be used
to prevent intentional Denial of Service attacks, as we can assume a DOS can
easily come through multiple user accounts. DOS protection should be done at a
different layer. Instead this module should be used to protect against
unintentional user actions. With that in mind the limits set here should be
high enough as to not rate-limit any intentional actions.
To find good rate-limit values, check how long requests are taking (see logs)
in your environment to assess your capabilities and multiply out to get
figures.
NOTE: As the rate-limiting here is done in memory, this only works per
process (each process will have its own rate limiting counter).
"""
import collections
import copy
import httplib
import math
import re
import time
import webob.dec
import webob.exc
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import quota
from nova import utils
from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(object):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def create(self, req, body):
"""Create a new limit."""
raise webob.exc.HTTPNotImplemented()
def delete(self, req, id):
"""Delete the limit."""
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
"""Return limit details."""
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Show limit information."""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""Update existing limit."""
raise webob.exc.HTTPNotImplemented()
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""
Stores information about a limit for HTTP requests.
"""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""
Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""
Represents a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("POST", "*/servers", "^/servers", 120, utils.TIME_UNITS['MINUTE']),
Limit("PUT", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*changes-since*", ".*changes-since.*", 120,
utils.TIME_UNITS['MINUTE']),
Limit("DELETE", "*", ".*", 120, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*/os-fping", "^/os-fping", 12, utils.TIME_UNITS['MINUTE']),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""
Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""
Represents a single call through this middleware. We should record the
request if we have a limit relevant to it. If no limit is relevant to
the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("nova.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.OverLimitFault(msg, error, retry)
req.environ["nova.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""
Rate-limit checking class which handles limits in memory.
"""
def __init__(self, limits, **kwargs):
"""
Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith('user:'):
username = key[5:]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""
Return the limits for a given user.
"""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""
Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""
Convert a string into a list of Limit instances. This
implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in utils.TIME_UNITS:
raise ValueError("Invalid units specified")
unit = utils.TIME_UNITS[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""
Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""
Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""
Handles a call to this application. Returns 204 if the request is
acceptable to the limiter, else a 403 is returned with a relevant
header indicating when the request *will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""
Rate-limit requests based on answers from a remote source.
"""
def __init__(self, limiter_address):
"""
Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""
Ignore a limits string--simply doesn't apply for the limit
proxy.
@return: Empty list.
"""
return []
| apache-2.0 | 8,665,861,442,626,651,000 | 31.7251 | 79 | 0.607439 | false |
MechCoder/scikit-learn | sklearn/feature_selection/univariate_selection.py | 19 | 26302 | """Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature seletion procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| bsd-3-clause | -6,488,583,189,400,307,000 | 33.883289 | 80 | 0.629952 | false |
PlutoniumHeart/ITK | Wrapping/Generators/Python/Tests/CastImageFilter.py | 19 | 1145 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the CastImageFilter
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.F, dim]
OIType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New(FileName=argv[1])
filter = itk.CastImageFilter[IType, OIType].New(reader)
writer = itk.ImageFileWriter[OIType].New(filter, FileName=argv[2])
writer.Update()
| apache-2.0 | 4,363,121,764,346,589,700 | 31.714286 | 77 | 0.636681 | false |
lshain-android-source/external-chromium_org | build/android/buildbot/bb_host_steps.py | 23 | 4602 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import bb_utils
import bb_annotations
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
SLAVE_SCRIPTS_DIR = os.path.join(bb_utils.BB_BUILD_DIR, 'scripts', 'slave')
VALID_HOST_TESTS = set(['check_webview_licenses', 'findbugs'])
EXPERIMENTAL_TARGETS = ['android_experimental']
# Short hand for RunCmd which is used extensively in this file.
RunCmd = bb_utils.RunCmd
def SrcPath(*path):
return os.path.join(constants.DIR_SOURCE_ROOT, *path)
def CheckWebViewLicenses(_):
bb_annotations.PrintNamedStep('check_licenses')
RunCmd([SrcPath('android_webview', 'tools', 'webview_licenses.py'), 'scan'],
warning_code=1)
def RunHooks(build_type):
RunCmd([SrcPath('build', 'landmines.py')])
build_path = SrcPath('out', build_type)
landmine_path = os.path.join(build_path, '.landmines_triggered')
clobber_env = os.environ.get('BUILDBOT_CLOBBER')
if clobber_env or os.path.isfile(landmine_path):
bb_annotations.PrintNamedStep('Clobber')
if not clobber_env:
print 'Clobbering due to triggered landmines:'
with open(landmine_path) as f:
print f.read()
RunCmd(['rm', '-rf', build_path])
bb_annotations.PrintNamedStep('runhooks')
RunCmd(['gclient', 'runhooks'], halt_on_failure=True)
def Compile(options):
RunHooks(options.target)
cmd = [os.path.join(SLAVE_SCRIPTS_DIR, 'compile.py'),
'--build-tool=ninja',
'--compiler=goma',
'--target=%s' % options.target,
'--goma-dir=%s' % bb_utils.GOMA_DIR]
build_targets = options.build_targets.split(',')
bb_annotations.PrintNamedStep('compile')
for build_target in build_targets:
RunCmd(cmd + ['--build-args=%s' % build_target], halt_on_failure=True)
if options.experimental:
for compile_target in EXPERIMENTAL_TARGETS:
bb_annotations.PrintNamedStep('Experimental Compile %s' % compile_target)
RunCmd(cmd + ['--build-args=%s' % compile_target], flunk_on_failure=False)
def ZipBuild(options):
bb_annotations.PrintNamedStep('zip_build')
RunCmd([
os.path.join(SLAVE_SCRIPTS_DIR, 'zip_build.py'),
'--src-dir', constants.DIR_SOURCE_ROOT,
'--build-dir', SrcPath('out'),
'--exclude-files', 'lib.target,gen,android_webview,jingle_unittests']
+ bb_utils.EncodeProperties(options))
def ExtractBuild(options):
bb_annotations.PrintNamedStep('extract_build')
RunCmd(
[os.path.join(SLAVE_SCRIPTS_DIR, 'extract_build.py'),
'--build-dir', SrcPath('build'), '--build-output-dir',
SrcPath('out')] + bb_utils.EncodeProperties(options),
warning_code=1)
def FindBugs(options):
bb_annotations.PrintNamedStep('findbugs')
build_type = []
if options.target == 'Release':
build_type = ['--release-build']
RunCmd([SrcPath('build', 'android', 'findbugs_diff.py')] + build_type)
RunCmd([SrcPath(
'tools', 'android', 'findbugs_plugin', 'test',
'run_findbugs_plugin_tests.py')] + build_type)
def BisectPerfRegression(_):
bb_annotations.PrintNamedStep('Bisect Perf Regression')
RunCmd([SrcPath('tools', 'prepare-bisect-perf-regression.py'),
'-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)])
RunCmd([SrcPath('tools', 'run-bisect-perf-regression.py'),
'-w', os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)])
def GetHostStepCmds():
return [
('compile', Compile),
('extract_build', ExtractBuild),
('check_webview_licenses', CheckWebViewLicenses),
('bisect_perf_regression', BisectPerfRegression),
('findbugs', FindBugs),
('zip_build', ZipBuild)
]
def GetHostStepsOptParser():
parser = bb_utils.GetParser()
parser.add_option('--steps', help='Comma separated list of host tests.')
parser.add_option('--build-targets', default='All',
help='Comma separated list of build targets.')
parser.add_option('--experimental', action='store_true',
help='Indicate whether to compile experimental targets.')
return parser
def main(argv):
parser = GetHostStepsOptParser()
options, args = parser.parse_args(argv[1:])
if args:
return sys.exit('Unused args %s' % args)
setattr(options, 'target', options.factory_properties.get('target', 'Debug'))
if options.steps:
bb_utils.RunSteps(options.steps.split(','), GetHostStepCmds(), options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | 6,647,442,044,972,894,000 | 31.638298 | 80 | 0.674272 | false |
StuartLittlefair/astropy | astropy/io/misc/asdf/tags/table/table.py | 8 | 4380 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf.tags.core.ndarray import NDArrayType
from astropy import table
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class TableType:
"""
This class defines to_tree and from_tree methods that are used by both the
AstropyTableType and the AsdfTableType defined below. The behavior is
differentiated by the ``_compat`` class attribute. When ``_compat==True``,
the behavior will conform to the table schema defined by the ASDF Standard.
Otherwise, the behavior will conform to the custom table schema defined by
Astropy.
"""
_compat = False
@classmethod
def from_tree(cls, node, ctx):
# This is getting meta, guys
meta = node.get('meta', {})
# This enables us to support files that use the table definition from
# the ASDF Standard, rather than the custom one that Astropy defines.
if cls._compat:
return table.Table(node['columns'], meta=meta)
if node.get('qtable', False):
t = table.QTable(meta=node.get('meta', {}))
else:
t = table.Table(meta=node.get('meta', {}))
for name, col in zip(node['colnames'], node['columns']):
t[name] = col
return t
@classmethod
def to_tree(cls, data, ctx):
columns = [data[name] for name in data.colnames]
node = dict(columns=columns)
# Files that use the table definition from the ASDF Standard (instead
# of the one defined by Astropy) will not contain these fields
if not cls._compat:
node['colnames'] = data.colnames
node['qtable'] = isinstance(data, table.QTable)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
try:
NDArrayType.assert_equal(np.array(old), np.array(new))
except (AttributeError, TypeError, ValueError):
for col0, col1 in zip(old, new):
try:
NDArrayType.assert_equal(np.array(col0), np.array(col1))
except (AttributeError, TypeError, ValueError):
assert col0 == col1
class AstropyTableType(TableType, AstropyType):
"""
This tag class reads and writes tables that conform to the custom schema
that is defined by Astropy (in contrast to the one that is defined by the
ASDF Standard). The primary reason for differentiating is to enable the
support of Astropy mixin columns, which are not supported by the ASDF
Standard.
"""
name = 'table/table'
types = ['astropy.table.Table']
requires = ['astropy']
class AsdfTableType(TableType, AstropyAsdfType):
"""
This tag class allows Astropy to read (and write) ASDF files that use the
table definition that is provided by the ASDF Standard (instead of the
custom one defined by Astropy). This is important to maintain for
cross-compatibility.
"""
name = 'core/table'
types = ['astropy.table.Table']
requires = ['astropy']
_compat = True
class ColumnType(AstropyAsdfType):
name = 'core/column'
types = ['astropy.table.Column', 'astropy.table.MaskedColumn']
requires = ['astropy']
handle_dynamic_subclasses = True
@classmethod
def from_tree(cls, node, ctx):
data = node['data']
name = node['name']
description = node.get('description')
unit = node.get('unit')
meta = node.get('meta', None)
return table.Column(
data=data._make_array(), name=name, description=description,
unit=unit, meta=meta)
@classmethod
def to_tree(cls, data, ctx):
node = {
'data': data.data,
'name': data.name
}
if data.description:
node['description'] = data.description
if data.unit:
node['unit'] = data.unit
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
assert old.description == new.description
assert old.unit == new.unit
NDArrayType.assert_equal(np.array(old), np.array(new))
| bsd-3-clause | -4,751,110,466,474,300,000 | 31.205882 | 79 | 0.619178 | false |
evalsocket/tensorflow | object_detection/object_detection/utils/np_box_list_ops_test.py | 26 | 16886 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_ops."""
import numpy as np
import tensorflow as tf
from object_detection.utils import np_box_list
from object_detection.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array(
[[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32))
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist,
[0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.0, 0.0, 0.7, 1.0]],
dtype=np.float32))
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
dtype=np.float32))
self.assertAllClose(boxlist_concatenated_expected.get(),
boxlist_concatenated.get())
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32))
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32))
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist.add_field('scores', np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32))
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, 'labels')
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ['objectness'])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ['labels'])
self.assertFalse(subboxlist.has_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'objectness')
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'labels')
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'scores', 'Descending')
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, 'scores')
expected_boxes = np.array([[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, 'scores', np_box_list_ops.SortOrder.ASCEND)
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]],
dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 1. # No NMS
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array(10 * [0.8]))
iou_threshold = .5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = .4
expected_boxes = np.array([[0, 0, 20, 100],
[200, 200, 210, 300],],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .5
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .8
expected_boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80],
[200, 200, 210, 300], [200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32))
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
boxlist.add_field('scores', scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = boxlist_clean.get_field('scores')
classes_clean = boxlist_clean.get_field('classes')
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| bsd-2-clause | 6,379,117,208,551,208,000 | 39.78744 | 80 | 0.551818 | false |
marklescroart/bvp | bvp/Classes/render_options.py | 1 | 37159 | """
TODO:
Might be nice to store specific presets of RenderOptions, for specific final
choices for rendering stimuli for a given experiment.
FOR NOW, this is not going to be a mapped class...
"""
# Imports
import os
import sys
import math as bnp
from .. import utils as bvpu
from ..options import config
try:
import bpy
import mathutils as bmu
is_blender = True
except ImportError:
is_blender = False
RENDER_DIR = os.path.expanduser(config.get('path', 'render_dir'))
bvp_basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '../'))
render_file = os.path.abspath(os.path.join(bvp_basedir, 'Scripts',
'BlenderRender.py'))
# The "type" input for compositor node creation has been arbitrarily changed
# numerous times throughout Blender API development. This is EXTREMELY
# IRRITATING. Nonetheless, the format may change again, so I've collected
# all the node type IDs here and use the variables below
if sys.platform == 'darwin':
# print('Mac computer node names!')
# RLayerNodeX = 'R_LAYERS'
# CompositorNodeX = 'COMPOSITE'
# OutputFileNodeX = 'OUTPUT_FILE'
# ViewerNodeX = 'VIEWER'
# SepRGBANodeX = 'CompositorNodeSepRGBA'
# CombRGBANodeX = 'CompositorNodeCombRGBA'
# IDmaskNodeX = 'ID_MASK'
# MathNodeX = 'CompositorNodeMath'
RLayerNode = 'CompositorNodeRLayers'
CompositorNode = 'CompositorNodeComposite'
OutputFileNode = 'CompositorNodeOutputFile'
ViewerNode = 'CompositorNodeViewer'
SepRGBANode = 'CompositorNodeSepRGBA'
CombRGBANode = 'CompositorNodeCombRGBA'
IDmaskNode = 'CompositorNodeIDMask'
MathNode = 'CompositorNodeMath'
else:
RLayerNode = 'CompositorNodeRLayers'
CompositorNode = 'CompositorNodeComposite'
OutputFileNode = 'CompositorNodeOutputFile'
ViewerNode = 'CompositorNodeViewer'
SepRGBANode = 'CompositorNodeSepRGBA'
CombRGBANode = 'CompositorNodeCombRGBA'
IDmaskNode = 'CompositorNodeIDMask'
MathNode = 'CompositorNodeMath'
class RenderOptions(object):
"""Class for storing render options for a scene."""
def __init__(self, blender_params=None, bvp_params=None):
"""Initialize rendering options for scenes in BVP framework.
Parameters
----------
blender_params : dict
directly updates any blender scene.render params for the scene
bvp_params : dict
establishes more complex BVP options (whether to initialize node setup
for different render passes [some work, others don't], base path for
render directory, and which file to use to render BVP scenes).
fields : defaults are as follows:
### --- Basic file / rendering stuff --- ###
Type : 'FirstFrame', # other options : All, FirstAndLastFrame, 'every4th'
RenderFile : os.path.join(bvp.__path__[0], 'Scripts', 'BlenderRender.py') # File to call to render scenes
BasePath : '/auto/k1/mark/Desktop/BlenderTemp/', # Base render path (TO DO: replace with bvp config settings)
### --- Render passes --- ###
Image : True # Render RGB(A) images or not (alpha layer or not determined by blender_params )
ObjectMasks : False # Render masks for all BVP objects in scene (working)
Zdepth : False # Render Z depth pass
Normals : False, # Not yet implemented
### --- Work-in-progress render passes (NOT working as of 2015.05) --- ###
Contours : False, #Freestyle, yet to be implemented
Motion : False, # Render motion (ground truth optical flow) pass
Voxels : False # Create voxelized version of each scene
Axes : False, # based on N.Cornea code, for now
Clay : False, # All shape, no material / texture (over-ride w/ plain [clay] material) lighting??
Notes
-----
RenderOptions does not directly modify a scene's file path; it only provides the base file (parent directory) for all rendering.
Scene's "apply_opts" function should be the only function to modify with bpy.context.scene.filepath (!!) (2012.03.12)
"""
self.use_freestyle = False # May need some clever if statement here - checking on version of blender
if self.use_freestyle:
pass
# Freestyle settings. Not used yet as of 2016.07
#FSlineThickness = 3.0
#FSlineCol = (0.0, 0.0, 0.0)
self.use_antialiasing = True
self.antialiasing_samples = '8'
self.use_edge_enhance = False
self.use_raytrace = True
self.use_compositing = True
self.use_textures = True
self.use_sss = False
self.use_shadows = True
self.use_envmaps = False
# File size
self.engine='BLENDER_RENDER'
self.resolution_x = 512
self.resolution_y = 512
self.resolution_percentage = 100
self.tile_x = 64 # More?
self.tile_y = 64 # More?
# Fields not in bpy.data.scene.render class:
# Image settings: File format and color mode
self.image_settings = dict(color_mode='RGBA',
file_format='PNG')
self.DefaultLayerOpts = {
'layers':tuple([True]*20),
'use_zmask':False,
'use_all_z':False,
'use_solid':True, # Necessary for almost everything
'use_halo':False,
'use_ztransp':False,
'use_sky':False,
'use_edge_enhance':False,
'use_strand':False,
'use_freestyle':False,
'use_pass_combined':False,
'use_pass_z':False,
'use_pass_vector':False,
'use_pass_normal':False,
'use_pass_uv':False,
'use_pass_mist':False,
'use_pass_object_index':False,
'use_pass_color':False,
'use_pass_diffuse':False,
'use_pass_specular':False,
'use_pass_shadow':False,
'use_pass_emit':False,
'use_pass_ambient_occlusion':False,
'use_pass_environment':False,
'use_pass_indirect':False,
'use_pass_reflection':False,
'use_pass_refraction':False,
}
self.BVPopts = {
# BVP specific rendering options
"Image":True,
"Voxels":False, # Not yet implemented reliably.
"ObjectMasks":False,
"Motion":False, # Not yet implemented reliably. Buggy AF
"Zdepth":False,
"Contours":False, #Freestyle, yet to be implemented
"Axes":False, # based on N.Cornea code, for now - still unfinished
"Normals":False,
"Clay":False, # Not yet implemented - All shape, no material / texture (over-ride w/ plain [clay] material) lighting??
"Type":'FirstFrame', # other options: "All", "FirstAndLastFrame", 'every4th'
"RenderFile":render_file,
"BasePath":RENDER_DIR,
}
# Disallow updates that add fields
self.__dict__ = bvpu.basics.fixedKeyDict(self.__dict__)
# Update defaults w/ inputs
if not bvp_params is None:
# TO DO: Change this variable name. Big change, tho.
self.BVPopts.update(bvp_params)
if not blender_params is None:
# TO DO: Clean this shit up. Sloppy organization.
if 'DefaultLayerOpts' in blender_params.keys():
DefaultLayerOpts = blender_params.pop('DefaultLayerOpts')
self.DefaultLayerOpts.update(DefaultLayerOpts)
self.__dict__.update(blender_params)
def __repr__(self):
S = 'Class "RenderOptions":\n'+self.__dict__.__repr__()
return S
def apply_opts(self, scn=None):
if scn is None:
# Get current scene if input not supplied
scn = bpy.context.scene
# Backwards compatibility:
if not 'Voxels' in self.BVPopts:
self.BVPopts['Voxels'] = False
if not 'Motion' in self.BVPopts:
self.BVPopts['Motion'] = False
scn.use_nodes = True
# Set only first layer to be active
if bpy.app.version < (2, 80, 0):
scn.layers = [True]+[False]*19
else:
# NEED TO FIGURE OUT WHAT TO DO HERE
pass
# Get all non-function attributes
ToSet = [x for x in self.__dict__.keys() if not hasattr(self.__dict__[x], '__call__') and not x in ['BVPopts', 'DefaultLayerOpts', 'image_settings']]
for s in ToSet:
try:
setattr(scn.render, s, self.__dict__[s]) # define __getattr__ or whatever so self[s] works
except:
print('Unable to set attribute %s!'%s)
# Set image settings:
scn.render.image_settings.file_format = self.image_settings['file_format']
scn.render.image_settings.color_mode = self.image_settings['color_mode']
# Re-set all nodes and render layers
for n in scn.node_tree.nodes:
scn.node_tree.nodes.remove(n)
old_render_layers = bpy.context.scene.render.layers.keys()
bpy.ops.scene.render_layer_add()
for ii in range(len(old_render_layers)):
bpy.context.scene.render.layers.active_index = 0
bpy.ops.scene.render_layer_remove()
# Rename newly-added layer (with default properties) to default name
# (Doing it in this order assures Blender won't make it, e.g., RenderLayer.001
# if a layer named RenderLayer already exists)
bpy.context.scene.render.layers[0].name = 'RenderLayer'
# Add basic node setup:
RL = scn.node_tree.nodes.new(type=RLayerNode)
compositor_output = scn.node_tree.nodes.new(type=CompositorNode)
scn.node_tree.links.new(RL.outputs['Image'], compositor_output.inputs['Image'])
scn.node_tree.links.new(RL.outputs['Alpha'], compositor_output.inputs['Alpha'])
# Decide whether we're only rendering one type of output:
single_output = sum([self.BVPopts['Image'], self.BVPopts['ObjectMasks'], self.BVPopts['Zdepth'],
self.BVPopts['Contours'], self.BVPopts['Axes'], self.BVPopts['Normals']])==1
if bpy.app.version < (2, 80, 0):
update = scn.update
else:
update = bpy.context.view_layer.update
# Add compositor nodes for optional outputs:
if self.BVPopts['Voxels']:
self.SetUpVoxelization()
update()
return # Special case! no other node-based options can be applied!
if self.BVPopts['ObjectMasks']:
self.add_object_masks(single_output=single_output)
if self.BVPopts['Motion']:
self.add_motion(single_output=single_output)
if self.BVPopts['Zdepth']:
self.add_depth(single_output=single_output)
if self.BVPopts['Contours']:
raise NotImplementedError('Not ready yet!')
if self.BVPopts['Axes']:
raise NotImplementedError('Not ready yet!')
if self.BVPopts['Normals']:
self.add_normals(single_output=single_output)
if self.BVPopts['Clay']:
raise NotImplementedError('Not ready yet!')
#self.AddClayLayerNodes(Is_RenderOnlyClay=single_output)
if not self.BVPopts['Image']:
# Switch all properties from one of the file output nodes to the composite output
# Grab a node
aa = [N for N in scn.node_tree.nodes if N.type=='OUTPUT_FILE']
print([a.type for a in scn.node_tree.nodes])
output = aa[0]
# Find input to this node
Lnk = [L for L in scn.node_tree.links if L.to_node == output][0]
Input = Lnk.from_socket
# Remove all input to composite node:
NodeComposite = [N for N in scn.node_tree.nodes if N.type=='COMPOSITE'][0]
L = [L for L in scn.node_tree.links if L.to_node==NodeComposite]
for ll in L:
scn.node_tree.links.remove(ll)
# Make link from input to file output to composite output:
scn.node_tree.links.new(Input, NodeComposite.inputs['Image'])
# Update Scene info to reflect node info:
scn.render.filepath = output.base_path+output.file_slots[0].path
scn.render.image_settings.file_format = output.format.file_format
# Get rid of old file output
scn.node_tree.nodes.remove(output)
# Get rid of render layer that renders image:
RL = scn.render.layers['RenderLayer']
scn.render.layers.remove(RL)
# Turn off raytracing??
update()
"""
Notes on nodes: The following functions add various types of compositor nodes to a scene in Blender.
These allow output of other image files that represent other "meta-information" (e.g. Z depth,
normals, etc) that is separate from the pixel-by-pixel color / luminance information in standard images.
To add nodes: NewNode = nt.nodes.new(type=NodeType)
See top of code for list of node types used.
"""
def add_object_masks(self, scn=None, single_output=False):
"""Adds compositor nodes to render out object masks.
Parameters
----------
scn : bpy.data.scene | None (default=None)
Leave as default (None) for now. Placeholder for future code updates.
single_output : bool
Whether to render ONLY masks.
Notes
-----
The current implementation relies on objects being linked into Blender scene (without creating proxies), or being
mesh objects. Older versions of the code filtered objects by whether or not they had any parent object. The old
way may be useful, if object insertion methods change.
IMPORTANT:
If scenes are created with many objects off-camera, this code will create a mask for EVERY off-scene object.
These masks will not be in the scene, but blender will render an image (an all-black image) for each and
every one of them.
"""
if not scn:
scn = bpy.context.scene
scn.use_nodes = True
scn.render.use_compositing = True
########################################################################
### --- First: Allocate all objects' pass indices (and groups??) --- ###
########################################################################
DisallowedNames = ['BG_', 'CamTar', 'Shadow_'] # Also constraint objects...
Ob = [o for o in bpy.context.scene.objects if not any([x in o.name for x in DisallowedNames])]
PassCt = 1
to_skip = []
for o in Ob:
if o.name in to_skip:
continue
# Check for dupli groups:
if o.type=='EMPTY':
if o.dupli_group:
o.pass_index = PassCt
for po in o.dupli_group.objects:
po.pass_index = PassCt
bvpu.blender.set_layers(o, [0, PassCt])
PassCt +=1
# Check for mesh objects:
elif o.type in ('MESH', 'CURVE'):
print('assigning pass index %d to %s'%(PassCt, o.name))
o.pass_index = PassCt
bvpu.blender.set_layers(o, [0, PassCt])
if bpy.app.version < (2, 80, 0):
ug = o.users_group
else:
ug = o.users_collection
if len(ug) > 0:
for sibling in ug[0].objects:
to_skip.append(sibling.name)
print('assigning pass index %d to %s'%(PassCt, sibling.name))
print(to_skip)
sibling.pass_index = PassCt
bvpu.blender.set_layers(sibling, [0, PassCt])
PassCt += 1
# Other types of objects??
#####################################################################
### --- Second: Set up render layers: --- ###
#####################################################################
RL = scn.render.layers.keys()
if not 'ObjectMasks1' in RL:
for iob in range(PassCt-1):
ob_layer = scn.render.layers.new('ObjectMasks%d'%(iob + 1))
for k, v in self.DefaultLayerOpts.items():
ob_layer.__setattr__(k, v)
layers = [False for x in range(20)];
layers[iob+1] = True
ob_layer.layers = tuple(layers)
ob_layer.use_ztransp = True # Necessary for object indices to work for transparent materials
ob_layer.use_pass_object_index = True # This one only
else:
raise Exception('ObjectMasks layers already exist!')
########################################################################
### --- Third: Set up compositor nodes: --- ###
########################################################################
nt = scn.node_tree
# Object index nodes (pass_index=100 is for skies!)
pass_idx = [o.pass_index for o in scn.objects if o.pass_index < 100]
max_pi = max(pass_idx)
print('Found %d pass indices'%(max_pi))
for iob in range(max_pi):
node_rl = nt.nodes.new(type=RLayerNode)
node_rl.layer = 'ObjectMasks%d'%(iob + 1)
NewVwNode = nt.nodes.new(ViewerNode)
NewIDNode = nt.nodes.new(IDmaskNode)
NewIDOut = nt.nodes.new(OutputFileNode)
VwNm = 'ID Mask %d View'%(iob+1)
NewVwNode.name = VwNm
IDNm = 'ID Mask %d'%(iob+1)
NewIDNode.name = IDNm
NewIDNode.index = iob+1
# Link nodes
nt.links.new(node_rl.outputs['IndexOB'], NewIDNode.inputs['ID value'])
nt.links.new(NewIDNode.outputs['Alpha'], NewIDOut.inputs[0])
nt.links.new(NewIDNode.outputs['Alpha'], NewVwNode.inputs['Image'])
NewIDOut.format.file_format = 'PNG'
NewIDOut.base_path = scn.render.filepath.replace('/Scenes/', '/Masks/')
endCut = NewIDOut.base_path.index('Masks/')+len('Masks/')
# Set unique name per frame
NewIDOut.file_slots[0].path = NewIDOut.base_path[endCut:]+'_m%02d'%(iob+1)
NewIDOut.name = 'Object %d'%(iob)
# Set base path
NewIDOut.base_path = NewIDOut.base_path[:endCut]
# Set location with NewIdNode.location = ((x, y))
nPerRow = 8.
Loc = bmu.Vector((bnp.modf(iob/nPerRow)[0]*nPerRow, -bnp.modf(iob/nPerRow)[1]))
Offset = 250.
Loc = Loc*Offset - bmu.Vector((nPerRow/2. * Offset - 300., 100.)) # hard-coded to be below RL node
NewIDNode.location = Loc
NewVwNode.location = Loc - bmu.Vector((0., 100))
NewIDOut.location = Loc - bmu.Vector((-150., 100))
def add_depth(self, scn=None, single_output=False):
"""Adds compositor nodes to render out Z buffer
"""
if not scn:
scn = bpy.context.scene
scn.use_nodes = True
scn.render.use_compositing = True
#####################################################################
### --- Set up render layers: --- ###
#####################################################################
RL = scn.render.layers.keys()
if not 'Zdepth' in RL:
#bpy.ops.scene.render_layer_add() # Seems like there should be a "name" input argument, but not yet so we have to be hacky about this:
#ob_layer = [x for x in scn.render.layers.keys() if not x in RL]
#ob_layer = scn.render.layers[ob_layer[0]]
ob_layer = scn.render.layers.new('Zdepth')
for k in self.DefaultLayerOpts.keys():
ob_layer.__setattr__(k, self.DefaultLayerOpts[k])
#ob_layer.name = 'Zdepth'
#RL.append('Zdepth')
ob_layer.use_ztransp = True # Necessary for z depth to work for transparent materials ?
ob_layer.use_pass_z = True # Principal interest
ob_layer.use_pass_object_index = True # for masking out depth of sky dome
else:
raise Exception('Zdepth layer already exists!')
########################################################################
### --- Set up compositor nodes: --- ###
########################################################################
nt = scn.node_tree
# Get all node names (keys)
node_rl = nt.nodes.new(type=RLayerNode)
node_rl.layer = 'Zdepth'
# Zero out all depth info from the sky dome (the sky doesn't have any depth!)
NodeSky = nt.nodes.new(IDmaskNode)
NodeSky.use_antialiasing = False #No AA for z depth! doesn't work to combine non-AA node w/ AA node!
NodeSky.index = 100
nt.links.new(node_rl.outputs['IndexOB'], NodeSky.inputs['ID value'])
NodeInv = nt.nodes.new(MathNode)
NodeInv.operation = 'SUBTRACT'
# Invert (ID) alpha layer, so sky values are zero, objects/bg are 1
NodeInv.inputs[0].default_value = 1.0
nt.links.new(NodeSky.outputs[0], NodeInv.inputs[1])
# Mask out sky by multiplying with inverted sky mask
NodeMult = nt.nodes.new(MathNode)
NodeMult.operation = 'MULTIPLY'
nt.links.new(node_rl.outputs['Depth'], NodeMult.inputs[0])
nt.links.new(NodeInv.outputs[0], NodeMult.inputs[1])
# Add 1000 to the sky:
NodeMult1000 = nt.nodes.new(MathNode)
NodeMult1000.operation = 'MULTIPLY'
NodeMult1000.inputs[0].default_value = 1000.0
nt.links.new(NodeMult1000.inputs[1], NodeSky.outputs[0])
NodeAdd1000 = nt.nodes.new(MathNode)
NodeAdd1000.operation = 'ADD'
NodeAdd1000.inputs[0].default_value = 1000.0
nt.links.new(NodeMult.outputs[0], NodeAdd1000.inputs[0])
nt.links.new(NodeMult1000.outputs[0], NodeAdd1000.inputs[1])
# Depth output node
DepthOut = nt.nodes.new(OutputFileNode)
DepthOut.location = bmu.Vector((900., 300.))
DepthOut.format.file_format = 'OPEN_EXR' # Changed 2012.10.24
if '/Masks/' in scn.render.filepath:
DepthOut.base_path = scn.render.filepath[0:-4] # get rid of "_m01"
DepthOut.base_path = DepthOut.base_path.replace('/Masks/', '/Zdepth/')+'_z'
elif '/Motion/' in scn.render.filepath:
DepthOut.base_path = scn.render.filepath[0:-4] # get rid of "_mot"
DepthOut.base_path = DepthOut.base_path.replace('/Motion/', '/Zdepth/')+'_z'
elif '/Normals/' in scn.render.filepath:
DepthOut.base_path = scn.render.filepath[0:-4] # get rid of "_nor"
DepthOut.base_path = DepthOut.base_path.replace('/Normals/', '/Zdepth/')+'_z'
else:
DepthOut.base_path = scn.render.filepath.replace('/Scenes/', '/Zdepth/')
# Set unique name per frame
endCut = DepthOut.base_path.index('Zdepth/')+len('Zdepth/')
DepthOut.file_slots[0].path = DepthOut.base_path[endCut:]+'_z'
# Set base path
DepthOut.base_path = DepthOut.base_path[:endCut]
nt.links.new(NodeAdd1000.outputs[0], DepthOut.inputs[0])
def add_normals(self, scn=None, single_output=False):
"""Adds compositor nodes to render out surface normals
Parameters
----------
scn : blender scene instance
scene to which to add normals. Defaults to current scene.
single_output : bool
Whether normals will be the only rendered output from the scene
"""
if not scn:
scn = bpy.context.scene
scn.use_nodes = True
scn.render.use_compositing = True
#####################################################################
### --- Set up render layers: --- ###
#####################################################################
RL = scn.render.layers.keys()
if not 'Normals' in RL:
bpy.ops.scene.render_layer_add() # Seems like there should be a "name" input argument, but not yet so we have to be hacky about this:
ob_layer = [x for x in scn.render.layers.keys() if not x in RL]
ob_layer = scn.render.layers[ob_layer[0]]
for k in self.DefaultLayerOpts.keys():
ob_layer.__setattr__(k, self.DefaultLayerOpts[k])
ob_layer.name = 'Normals'
RL.append('Normals')
ob_layer.use_ztransp = True # Necessary for Normals to work for transparent materials ?
ob_layer.use_pass_normal = True # Principal interest
ob_layer.use_pass_object_index = True # for masking out sky dome normals
else:
raise Exception('Normal layer already exists!')
########################################################################
### --- Set up compositor nodes: --- ###
########################################################################
# TO DO: Make a sensible layout for these, i.e. set .location field for all nodes (not urgent...)
nt = scn.node_tree
node_rl = nt.nodes.new(type=RLayerNode)
node_rl.layer = 'Normals'
# Normal output nodes
# (1) Split normal channels
NorSpl = nt.nodes.new(type=SepRGBANode)
nt.links.new(node_rl.outputs['Normal'], NorSpl.inputs['Image'])
NorSpl.location = node_rl.location + bmu.Vector((600., 0))
UpDown = [75., 0., -75.]
# (2) Zero out all normals on the sky dome (the sky doesn't really curve!)
NodeSky = nt.nodes.new(IDmaskNode)
NodeSky.use_antialiasing = True
NodeSky.index = 100
nt.links.new(node_rl.outputs['IndexOB'], NodeSky.inputs['ID value'])
NodeInv = nt.nodes.new(MathNode)
NodeInv.operation = 'SUBTRACT'
# Invert alpha layer, so sky values are zero
NodeInv.inputs[0].default_value = 1.0
nt.links.new(NodeSky.outputs[0], NodeInv.inputs[1])
# (3) re-combine to RGB image
NorCom = nt.nodes.new(type=CombRGBANode)
NorCom.location = node_rl.location + bmu.Vector((1050., 0.))
# Normal values go from -1 to 1, but image formats won't support that, so we will add 1
# and store a floating-point value from to 0-2 in an .hdr file
for iMap in range(3):
# For masking out sky:
NodeMult = nt.nodes.new(MathNode)
NodeMult.operation = 'MULTIPLY'
# For adding 1 to normal values:
NodeAdd1 = nt.nodes.new(MathNode)
NodeAdd1.operation = 'ADD'
NodeAdd1.inputs[1].default_value = 1.0
# Link nodes for order of computation:
# multiply by inverse of sky alpha:
nt.links.new(NorSpl.outputs['RGB'[iMap]], NodeMult.inputs[0])
nt.links.new(NodeInv.outputs['Value'], NodeMult.inputs[1])
# Add 1:
nt.links.new(NodeMult.outputs['Value'], NodeAdd1.inputs[0])
# Re-combine:
nt.links.new(NodeAdd1.outputs['Value'], NorCom.inputs['RGB'[iMap]])
# Normal output node
NorOut = nt.nodes.new(OutputFileNode)
NorOut.location = node_rl.location + bmu.Vector((1200., 0.))
NorOut.format.file_format = 'OPEN_EXR' #'PNG'
NorOut.name = 'fOutput Normals'
nt.links.new(NorCom.outputs['Image'], NorOut.inputs[0])
# If any other node is the principal node, replace (output folder) with /Normals/:
if '/Masks/' in scn.render.filepath:
NorOut.base_path = scn.render.filepath[0:-4] # get rid of "_m01"
NorOut.base_path = NorOut.base_path.replace('/Masks/', '/Normals/')+'_z'
elif '/Motion/' in scn.render.filepath:
NorOut.base_path = scn.render.filepath[0:-4] # get rid of "_mot"
NorOut.base_path = NorOut.base_path.replace('/Motion/', '/Normals/')+'_mot'
elif '/Zdepth/' in scn.render.filepath:
NorOut.base_path = NorOut.base_path[0:-2] # remove '_z'
NorOut.base_path = scn.render.filepath.replace('/Zdepth/', '/Scenes/')+'_nor'
else:
NorOut.base_path = scn.render.filepath.replace('/Scenes/', '/Normals/')
# Set unique name per frame
print(NorOut.base_path)
endCut = NorOut.base_path.index('Normals/')+len('Normals/')
NorOut.file_slots[0].path = NorOut.base_path[endCut:]+'_nor'
# Set base path
NorOut.base_path = NorOut.base_path[:endCut]
nt.links.new(NorCom.outputs['Image'], NorOut.inputs[0])
def add_motion(self, scn=None, single_output=False):
"""Adds compositor nodes to render motion (optical flow, a.k.a. vector pass)
Parameters
----------
scn : bpy scene instance | None. default = None
Leave as default (None) for now. For potential future code upgrades
single_output : bool
Whether optical flow will be the only rendered output from the scene
"""
if not scn:
scn = bpy.context.scene
scn.use_nodes = True
scn.render.use_compositing = True
#####################################################################
### --- Set up render layers: --- ###
#####################################################################
RL = scn.render.layers.keys()
if not 'Motion' in RL:
bpy.ops.scene.render_layer_add()
# Seems like there should be a "name" input argument, but not yet so we have to be hacky about this:
ob_layer = [x for x in scn.render.layers.keys() if not x in RL]
ob_layer = scn.render.layers[ob_layer[0]]
# /Hacky
# Set default layer options
for k in self.DefaultLayerOpts.keys():
ob_layer.__setattr__(k, self.DefaultLayerOpts[k])
# And set motion-specific layer options
ob_layer.name = 'Motion'
ob_layer.use_pass_vector = True # Motion layer
ob_layer.use_ztransp = True # Necessary (?) for motion to work for transparent materials
ob_layer.use_pass_z = True # Necessary (?)
#ob_layer.use_pass_object_index = True # for masking out depth of sky dome
RL.append('Motion')
else:
raise Exception('Motion layer already exists!')
########################################################################
### --- Set up compositor nodes: --- ###
########################################################################
nt = scn.node_tree
# Get all node names (keys)
node_rl = nt.nodes.new(type=RLayerNode)
node_rl.layer = 'Motion'
# QUESTION: Better to zero out motion in sky?? NO for now,
# but leave here in case we want the option later...
if False:
# Zero out all depth info from the sky dome (the sky doesn't have any depth!)
NodeSky = nt.nodes.new(IDmaskNode)
NodeSky.use_antialiasing = False #No AA for z depth! doesn't work to combine non-AA node w/ AA node!
NodeSky.index = 100
nt.links.new(node_rl.outputs['IndexOB'], NodeSky.inputs['ID value'])
NodeInv = nt.nodes.new(MathNode)
NodeInv.operation = 'SUBTRACT'
# Invert (ID) alpha layer, so sky values are zero, objects/bg are 1
NodeInv.inputs[0].default_value = 1.0
nt.links.new(NodeSky.outputs[0], NodeInv.inputs[1])
# Mask out sky by multiplying with inverted sky mask
NodeMult = nt.nodes.new(MathNode)
NodeMult.operation = 'MULTIPLY'
nt.links.new(node_rl.outputs['Speed'], NodeMult.inputs[0])
nt.links.new(NodeInv.outputs[0], NodeMult.inputs[1])
# Add 1000 to the sky:
NodeMult1000 = nt.nodes.new(MathNode)
NodeMult1000.operation = 'MULTIPLY'
NodeMult1000.inputs[0].default_value = 1000.0
nt.links.new(NodeMult1000.inputs[1], NodeSky.outputs[0])
NodeAdd1000 = nt.nodes.new(MathNode)
NodeAdd1000.operation = 'ADD'
NodeAdd1000.inputs[0].default_value = 1000.0
nt.links.new(NodeMult.outputs[0], NodeAdd1000.inputs[0])
nt.links.new(NodeMult1000.outputs[0], NodeAdd1000.inputs[1])
# Depth output node
MotionOut = nt.nodes.new(OutputFileNode)
MotionOut.location = bmu.Vector((0., 300.))
MotionOut.format.file_format = 'OPEN_EXR'
if '/Masks/' in scn.render.filepath:
MotionOut.base_path = scn.render.filepath[0:-4] # get rid of "_m01"
MotionOut.base_path = DepthOut.base_path.replace('/Masks/', '/Motion/')+'_mot'
elif '/Normals/' in scn.render.filepath:
MotionOut.base_path = scn.render.filepath[0:-4] # get rid of "_nor"
MotionOut.base_path = DepthOut.base_path.replace('/Normals/', '/Motion/')+'_mot'
elif '/Zdepth/' in scn.render.filepath:
MotionOut.base_path = scn.render.filepath[0:-2] # get rid of "_z"
MotionOut.base_path = DepthOut.base_path.replace('/Zdepth/', '/Motion/')+'_mot'
else:
MotionOut.base_path = scn.render.filepath.replace('/Scenes/', '/Motion/')
# Set unique name per frame
endCut = MotionOut.base_path.index('Motion/')+len('Motion/')
MotionOut.file_slots[0].path = MotionOut.base_path[endCut:]+'_mot'
# Set base path
MotionOut.base_path = MotionOut.base_path[:endCut]
nt.links.new(node_rl.outputs['Speed'], MotionOut.inputs[0])
def SetUpVoxelization(self, scn=None):
"""
Set up Blender for rendering images to create 3D voxelization of an object
NOTE: This sets up camera, rendering engine, and materials - NOT camera trajectory!
"""
#, xL=(-5, 5), yL=(-5, 5), zL=(0, 10), nGrid=10, fix=None
import math
if scn is None:
scn = bpy.context.scene
# Set renderer to cycles
scn.render.engine = 'CYCLES'
# Set camera to cycles, fisheye equisolid, 360 deg fov
Cam = [o for o in bpy.context.scene.objects if o.type=='CAMERA']
if len(Cam)==1:
Cam = Cam[0]
else:
raise Exception('Zero or >1 camera in your scene! WTF!!')
Cam.data.type='PANO'
Cam.data.cycles.fisheye_fov = math.pi*2.
Cam.data.cycles.panorama_type='FISHEYE_EQUISOLID'
# Get all-white cycles emission material
fpath = os.path.join(bvp_basedir, 'BlendFiles')
fName = 'Cycles_Render.blend'
MatNm = 'CycWhite'
bpy.ops.wm.link_append(
directory=os.path.join(fpath, fName)+"\\Material\\", # i.e., directory WITHIN .blend file (Scenes / Objects / Materials)
filepath="//"+fName+"\\Material\\"+'CycWhite', # local filepath within .blend file to the material to be imported
filename='CycWhite', # "filename" being the name of the data block, i.e. the name of the material.
link=False,
relative_path=False,
)
# For all dupli-objects in scene, create proxies
for bOb in bpy.context.scene.objects:
# Create proxies for all objects within object
if bOb.dupli_group:
for o in bOb.dupli_group.objects:
bvpu.blender.grab_only(bOb)
bpy.ops.object.proxy_make(object=o.name) #, object=bOb.name, type=o.name)
# Get rid of linked group now that dupli group objects are imported
bpy.context.scene.objects.unlink(bOb)
# Change all materials to white Cycles emission material ("CycWhite", imported above)
for nOb in bpy.context.scene.objects:
for m in nOb.material_slots:
m.material = bpy.data.materials['CycWhite']
@classmethod
def from_blend(cls, scn=None, bvp_params=None, blender_params=None):
"""Initialize render options from a given blend file
bvp_params : dict
bvp_params to override defaults
blender_params : dict
dict to override any params found in file
"""
pass
| bsd-2-clause | -3,798,760,241,647,120,400 | 48.347942 | 157 | 0.5579 | false |
Synerty/vortexpy | vortex/test/TuplePerfTest.py | 1 | 1588 | from datetime import datetime
import gc
import os
import psutil
import pytz
process = psutil.Process(os.getpid())
ITER_COUNT = 1000 * 1000 * 5
RESULT = None
def makeL(i):
# Use this line to negate the effect of the strings on the test
# return "Python is smart and will only create one string with this line"
# Use this if you want to see the difference with 5 million unique strings
return "This is a sample string %s" % i
def timeit(method):
def timed(*args, **kw):
global RESULT
RESULT = None
gc.collect()
s = datetime.now(pytz.utc)
startMem = process.memory_info().rss
RESULT = method(*args, **kw)
e = datetime.now(pytz.utc)
endMem = process.memory_info().rss
sizeMb = (endMem - startMem) / 1024 / 1024
sizeMbStr = "{0:,}".format(round(sizeMb, 2))
print('Time Taken = %s, \t%s, \tSize = %s' % (e - s, method.__name__, sizeMbStr))
return timed
from vortex.Tuple import Tuple, addTupleType, TupleHash
from vortex.Payload import Payload
@addTupleType
class X(Tuple):
__tupleType__ = "X"
__slots__ = ["i", "l"]
def __init__(self, i=None, l=None):
self.i, self.l = i, l
@timeit
def provile_dict_of_nt():
return [X(i=i, l=makeL(i)) for i in range(ITER_COUNT)]
if __name__ == "__main__":
provile_dict_of_nt()
tupleIn = X("val1", "val2")
encodedPayload = Payload(tuples=[tupleIn]).toEncodedPayload()
payload = Payload().fromEncodedPayload(encodedPayload)
assert TupleHash(tupleIn) == TupleHash(payload.tuples[0])
| mit | 8,186,198,323,836,041,000 | 22.701493 | 89 | 0.625945 | false |
go-python/gopy-gen | _examples/seqs/test.py | 2 | 1049 | # Copyright 2015 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
## py2/py3 compat
from __future__ import print_function
import sys
_PY3 = sys.version_info[0] == 3
if _PY3:
xrange = range
import seqs
### test docs
print("doc(seqs): %s" % repr(seqs.__doc__).lstrip('u'))
# note: arrays not settable from python -- use slices instead
# print("arr = seqs.Array(xrange(2))")
# arr = seqs.Array(xrange(2))
# print("arr = %s" % (arr,))
#
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s = seqs.Slice([1,2])")
s = seqs.Slice([1,2])
print("s = %s" % (s,))
print("s = seqs.Slice(range(10))")
s = seqs.Slice(range(10))
print("s = %s" % (s,))
print("s = seqs.Slice(xrange(10))")
s = seqs.Slice(xrange(10))
print("s = %s" % (s,))
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s += [1,2]")
s += [1,2]
print("s = %s" % (s,))
print("s += [10,20]")
s += [10,20]
print("s = %s" % (s,))
print("OK")
| bsd-3-clause | -7,480,195,438,124,358,000 | 19.98 | 61 | 0.580553 | false |
yuanming-hu/taichi | tests/python/test_grouped.py | 1 | 3479 | import taichi as ti
@ti.all_archs
def test_vector_index():
val = ti.field(ti.i32)
n = 4
m = 7
p = 11
ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val)
@ti.kernel
def test():
for i in range(n):
for j in range(m):
for k in range(p):
I = ti.Vector([i, j, k])
val[I] = i + j * 2 + k * 3
test()
for i in range(n):
for j in range(m):
for k in range(p):
assert val[i, j, k] == i + j * 2 + k * 3
@ti.all_archs
def test_grouped():
val = ti.field(ti.i32)
n = 4
m = 8
p = 16
ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val)
@ti.kernel
def test():
for I in ti.grouped(val):
val[I] = I[0] + I[1] * 2 + I[2] * 3
test()
for i in range(n):
for j in range(m):
for k in range(p):
assert val[i, j, k] == i + j * 2 + k * 3
@ti.all_archs
def test_grouped_ndrange():
val = ti.field(ti.i32)
n = 4
m = 8
ti.root.dense(ti.ij, (n, m)).place(val)
x0 = 2
y0 = 3
x1 = 1
y1 = 6
@ti.kernel
def test():
for I in ti.grouped(ti.ndrange((x0, y0), (x1, y1))):
val[I] = I[0] + I[1] * 2
test()
for i in range(n):
for j in range(m):
assert val[i, j] == (i +
j * 2 if x0 <= i < y0 and x1 <= j < y1 else 0)
@ti.all_archs
def test_static_grouped_ndrange():
val = ti.field(ti.i32)
n = 4
m = 8
ti.root.dense(ti.ij, (n, m)).place(val)
x0 = 2
y0 = 3
x1 = 1
y1 = 6
@ti.kernel
def test():
for I in ti.static(ti.grouped(ti.ndrange((x0, y0), (x1, y1)))):
val[I] = I[0] + I[1] * 2
test()
for i in range(n):
for j in range(m):
assert val[i, j] == (i +
j * 2 if x0 <= i < y0 and x1 <= j < y1 else 0)
@ti.all_archs
def test_grouped_ndrange_starred():
val = ti.field(ti.i32)
n = 4
m = 8
p = 16
dim = 3
ti.root.dense(ti.ijk, (n, m, p)).place(val)
@ti.kernel
def test():
for I in ti.grouped(ti.ndrange(*(((0, n), ) * dim))):
val[I] = I[0] + I[1] * 2 + I[2] * 3
test()
for i in range(n):
for j in range(m):
for k in range(p):
assert val[i, j,
k] == (i + j * 2 + k * 3 if j < n and k < n else 0)
@ti.all_archs
def test_grouped_ndrange_0d():
val = ti.field(ti.i32, shape=())
@ti.kernel
def test():
for I in ti.grouped(ti.ndrange()):
val[I] = 42
test()
assert val[None] == 42
@ti.all_archs
def test_static_grouped_ndrange_0d():
val = ti.field(ti.i32, shape=())
@ti.kernel
def test():
for I in ti.static(ti.grouped(ti.ndrange())):
val[I] = 42
test()
assert val[None] == 42
@ti.all_archs
def test_static_grouped_func():
K = 3
dim = 2
v = ti.Vector.field(K, dtype=ti.i32, shape=((K, ) * dim))
def stencil_range():
return ti.ndrange(*((K, ) * (dim + 1)))
@ti.kernel
def p2g():
for I in ti.static(ti.grouped(stencil_range())):
v[I[0], I[1]][I[2]] = I[0] + I[1] * 3 + I[2] * 10
p2g()
for i in range(K):
for j in range(K):
for k in range(K):
assert v[i, j][k] == i + j * 3 + k * 10
| mit | 2,686,256,191,990,908,400 | 18.220994 | 79 | 0.443806 | false |
harrissoerja/rapidpro | temba/msgs/migrations/0017_install_label_triggers.py | 2 | 4542 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_label_counts(apps, schema_editor):
"""
Iterate across all our labels, calculate how many visible, non-test messages are in them
"""
Label = apps.get_model('msgs', 'Label')
user_labels = Label.objects.filter(label_type='L')
for label in user_labels:
label.visible_count = label.msgs.filter(visibility='V', contact__is_test=False).count()
label.save(update_fields=('visible_count',))
print
print "Pre-calculated counts for %d labels" % len(user_labels)
class Migration(migrations.Migration):
dependencies = [
('msgs', '0016_label_visible_count'),
]
operations = [
migrations.RunPython(
populate_label_counts,
),
migrations.RunSQL(
# language=SQL
"""
CREATE OR REPLACE FUNCTION update_label_count() RETURNS TRIGGER AS $$
DECLARE
is_included boolean;
BEGIN
-- label applied to message
IF TG_TABLE_NAME = 'msgs_msg_labels' AND TG_OP = 'INSERT' THEN
-- is this message visible and non-test?
SELECT (msgs_msg.visibility = 'V' AND NOT contacts_contact.is_test) INTO STRICT is_included
FROM msgs_msg
INNER JOIN contacts_contact ON contacts_contact.id = msgs_msg.contact_id
WHERE msgs_msg.id = NEW.msg_id;
IF is_included THEN
UPDATE msgs_label SET visible_count = visible_count + 1 WHERE id=NEW.label_id;
END IF;
-- label removed from message
ELSIF TG_TABLE_NAME = 'msgs_msg_labels' AND TG_OP = 'DELETE' THEN
-- is this message visible and non-test?
SELECT (msgs_msg.visibility = 'V' AND NOT contacts_contact.is_test) INTO STRICT is_included
FROM msgs_msg
INNER JOIN contacts_contact ON contacts_contact.id = msgs_msg.contact_id
WHERE msgs_msg.id = OLD.msg_id;
IF is_included THEN
UPDATE msgs_label SET visible_count = visible_count - 1 WHERE id=OLD.label_id;
END IF;
-- no more labels for any messages
ELSIF TG_TABLE_NAME = 'msgs_msg_labels' AND TG_OP = 'TRUNCATE' THEN
UPDATE msgs_label SET visible_count = 0;
-- message visibility may have changed
ELSIF TG_TABLE_NAME = 'msgs_msg' AND TG_OP = 'UPDATE' THEN
-- is being archived (i.e. no longer included)
IF OLD.visibility = 'V' AND NEW.visibility = 'A' THEN
UPDATE msgs_label SET visible_count = msgs_label.visible_count - 1
FROM msgs_msg_labels
WHERE msgs_msg_labels.label_id = msgs_label.id AND msgs_msg_labels.msg_id = NEW.id;
END IF;
-- is being restored (i.e. now included)
IF OLD.visibility = 'A' AND NEW.visibility = 'V' THEN
UPDATE msgs_label SET visible_count = msgs_label.visible_count + 1
FROM msgs_msg_labels
WHERE msgs_msg_labels.label_id = msgs_label.id AND msgs_msg_labels.msg_id = NEW.id;
END IF;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Install INSERT and DELETE triggers for msgs_msg_labels
DROP TRIGGER IF EXISTS when_label_inserted_or_deleted_then_update_count_trg ON msgs_msg_labels;
CREATE TRIGGER when_label_inserted_or_deleted_then_update_count_trg
AFTER INSERT OR DELETE ON msgs_msg_labels
FOR EACH ROW EXECUTE PROCEDURE update_label_count();
-- Install TRUNCATE trigger for msgs_msg_labels
DROP TRIGGER IF EXISTS when_labels_truncated_then_update_count_trg ON msgs_msg_labels;
CREATE TRIGGER when_labels_truncated_then_update_count_trg
AFTER TRUNCATE ON msgs_msg_labels
EXECUTE PROCEDURE update_label_count();
-- Install UPDATE trigger for msgs_msg
DROP TRIGGER IF EXISTS when_msg_updated_then_update_label_counts_trg ON msgs_msg;
CREATE TRIGGER when_msg_updated_then_update_label_counts_trg
AFTER UPDATE OF visibility ON msgs_msg
FOR EACH ROW EXECUTE PROCEDURE update_label_count();
"""
),
]
| agpl-3.0 | -5,752,431,562,816,652,000 | 42.257143 | 107 | 0.581902 | false |
Stanford-Online/edx-platform | lms/djangoapps/support/urls.py | 10 | 1447 | """
URLs for the student support app.
"""
from django.conf.urls import url
from lms.djangoapps.support.views.contact_us import ContactUsView
from support.views.certificate import CertificatesSupportView
from support.views.course_entitlements import EntitlementSupportView
from support.views.enrollments import EnrollmentSupportListView, EnrollmentSupportView
from support.views.index import index
from support.views.manage_user import ManageUserDetailView, ManageUserSupportView
from support.views.refund import RefundSupportView
COURSE_ENTITLEMENTS_VIEW = EntitlementSupportView.as_view()
app_name = 'support'
urlpatterns = [
url(r'^$', index, name="index"),
url(r'^certificates/?$', CertificatesSupportView.as_view(), name="certificates"),
url(r'^refund/?$', RefundSupportView.as_view(), name="refund"),
url(r'^enrollment/?$', EnrollmentSupportView.as_view(), name="enrollment"),
url(r'^course_entitlement/?$', COURSE_ENTITLEMENTS_VIEW, name="course_entitlement"),
url(r'^contact_us/?$', ContactUsView.as_view(), name="contact_us"),
url(
r'^enrollment/(?P<username_or_email>[\w.@+-]+)?$',
EnrollmentSupportListView.as_view(),
name="enrollment_list"
),
url(r'^manage_user/?$', ManageUserSupportView.as_view(), name="manage_user"),
url(
r'^manage_user/(?P<username_or_email>[\w.@+-]+)?$',
ManageUserDetailView.as_view(),
name="manage_user_detail"
),
]
| agpl-3.0 | 7,024,482,962,946,849,000 | 40.342857 | 88 | 0.712509 | false |
BGS/pyLauncher | pyl_ui/options_ui.py | 1 | 11888 | '''
pyLauncher: Windows Application Launcher
Copyright (C) Blaga Florentin Gabriel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from pyl_core.pyl_config_parser import Parser
from pyl_core.pyl_winreg import addToRegistry, removeFromRegistry
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
import os
import sys
class Ui_Options(QtGui.QWidget):
def __init__(self, main_window_instance=None, parent=None):
super(Ui_Options, self).__init__(parent)
self.setWindowTitle("pyLauncher | Options")
self.main_window_instance = main_window_instance
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.resize(419, 243)
self.pylMainTab = QtGui.QTabWidget(self)
self.pylMainTab.setGeometry(QtCore.QRect(0, 10, 421, 241))
self.optionsMainTab = QtGui.QWidget()
self.autoUpdatecheckBox = QtGui.QCheckBox(self.optionsMainTab)
self.autoUpdatecheckBox.setGeometry(QtCore.QRect(10, 30, 161, 18))
self.startWithWindowsCheckbox = QtGui.QCheckBox(self.optionsMainTab)
self.startWithWindowsCheckbox.setGeometry(QtCore.QRect(10, 70, 121, 18))
self.numberOfResultsDisplayed = QtGui.QSpinBox(self.optionsMainTab)
self.numberOfResultsDisplayed.setGeometry(QtCore.QRect(350, 30, 46, 22))
self.checkBoxStayOnTop = QtGui.QCheckBox(self.optionsMainTab)
self.checkBoxStayOnTop.setGeometry(QtCore.QRect(10, 110, 131, 18))
self.numResultDispalyed = QtGui.QLabel(self.optionsMainTab)
self.numResultDispalyed.setGeometry(QtCore.QRect(190, 30, 141, 21))
self.tipsCheckBox = QtGui.QCheckBox("Show tips on start up", self.optionsMainTab)
self.tipsCheckBox.setGeometry(10, 140, 131, 18)
self.autoUpdatepyLauncher = QtGui.QCheckBox(self.optionsMainTab)
self.autoUpdatepyLauncher.setGeometry(QtCore.QRect(190, 70, 161, 18))
self.transpLabel = QtGui.QLabel("Transparency:", self.optionsMainTab)
self.transpLabel.setGeometry(QtCore.QRect(180, 111, 71, 20))
self.transparencySpinBox = QtGui.QDoubleSpinBox(self.optionsMainTab)
self.transparencySpinBox.setGeometry(QtCore.QRect(260, 110, 62, 22))
self.transparencySpinBox.setMinimum(0.1)
self.transparencySpinBox.setMaximum(1.0)
self.transparencySpinBox.setDecimals(1)
self.transparencySpinBox.setSingleStep(0.1)
self.transparencySpinBox.setRange(0.1, 1.0)
self.label_3 = QtGui.QLabel(self.optionsMainTab)
self.label_3.setGeometry(QtCore.QRect(70, 150, 241, 61))
font = QtGui.QFont()
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe Print"))
font.setPointSize(28)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.pylMainTab.addTab(self.optionsMainTab, _fromUtf8("About pyLauncher"))
self.pylPluginsTab = QtGui.QWidget()
self.availPlugsBox = QtGui.QGroupBox(self.pylPluginsTab)
self.availPlugsBox.setGeometry(QtCore.QRect(10, 20, 391, 181))
self.tableView = QtGui.QTableWidget(self.availPlugsBox)
self.tableView.setGeometry(QtCore.QRect(10, 20, 371, 151))
self.tableView.setColumnCount(3)
self.pylMainTab.addTab(self.pylPluginsTab, _fromUtf8("About pyLauncher2"))
self.pylAbout = QtGui.QWidget()
self.label = QtGui.QLabel(self.pylAbout)
self.label.setGeometry(QtCore.QRect(80, 0, 241, 61))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Segoe Print"))
font.setPointSize(28)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label_2 = QtGui.QLabel(self.pylAbout)
self.label_2.setGeometry(QtCore.QRect(10, 20, 391, 181))
font = QtGui.QFont()
self.label_2.setFont(font)
self.label_2.setWordWrap(True)
self.label_2.setIndent(7)
self.label_2.setOpenExternalLinks(True)
self.pylMainTab.addTab(self.pylAbout, _fromUtf8("About pyLauncher"))
self.pylMainTab.setCurrentIndex(0)
self.parser = Parser()
if self.parser.read_value('auto_update', 'True', 'str') == 'True':
self.autoUpdatepyLauncher.toggle()
if self.parser.read_value('autosync', 'True', 'str') == 'True':
self.autoUpdatecheckBox.toggle()
if self.parser.read_value('autorun', 'True', 'str') == 'True':
self.startWithWindowsCheckbox.toggle()
if self.parser.read_value('always_on_top', 'True', 'str') == 'True':
self.checkBoxStayOnTop.toggle()
if self.parser.read_value('show_tips', 'True', 'str') == 'True':
self.tipsCheckBox.toggle()
self.numberOfResultsDisplayed.setValue(self.parser.read_value('max_results', 5, 'int'))
self.numberOfResultsDisplayed.setMinimum(1)
self.transparencySpinBox.setValue(self.parser.read_value('transparency', 0.8, 'float'))
self.retranslateUi()
self.setPluginInfo()
QtCore.QMetaObject.connectSlotsByName(self)
self.connectSignals()
def setPluginInfo(self):
plugin_info = self.main_window_instance.getPluginInformation()
self.tableView.setHorizontalHeaderLabels(['Plugin', 'Author', 'Version'])
for plugin in plugin_info.keys():
plugin_info_list = plugin_info[plugin]
lastrow = self.tableView.rowCount()
self.tableView.insertRow(lastrow)
self.tableView.setItem(lastrow, 0, QtGui.QTableWidgetItem(plugin_info_list[0]))
self.tableView.setItem(lastrow, 1, QtGui.QTableWidgetItem(plugin_info_list[1]))
self.tableView.setItem(lastrow, 2, QtGui.QTableWidgetItem(plugin_info_list[2]))
def connectSignals(self):
self.checkBoxStayOnTop.stateChanged.connect(self.stayOnTopCheckBox)
self.startWithWindowsCheckbox.stateChanged.connect(self.autoStartCheckBox)
self.autoUpdatecheckBox.stateChanged.connect(self._autoSyncCheckBox)
self.numberOfResultsDisplayed.valueChanged.connect(self.updateNumberOfResults)
self.autoUpdatepyLauncher.stateChanged.connect(self.autoUpdateStateChanged)
self.transparencySpinBox.valueChanged.connect(self.updateTransparency)
self.tipsCheckBox.stateChanged.connect(self.tipsCheckBoxState)
def tipsCheckBoxState(self, state):
if state == QtCore.Qt.Checked:
self.parser.set_value('show_tips', 'True')
else:
self.parser.set_value('show_tips', 'False')
def updateTransparency(self, value):
self.parser.set_value('transparency', value)
self.main_window_instance.setWindowOpacity(value)
def autoUpdateStateChanged(self, state):
if state == QtCore.Qt.Checked:
self.parser.set_value('auto_update', 'True')
else:
self.parser.set_value('auto_update', 'False')
def updateNumberOfResults(self):
self.parser.set_value('max_results', self.numberOfResultsDisplayed.value())
def _autoSyncCheckBox(self, state):
if state == QtCore.Qt.Checked:
self.parser.set_value('autosync', 'True')
else:
self.parser.set_value('autosync', 'False')
def autoStartCheckBox(self, state):
if state == QtCore.Qt.Checked:
self.parser.set_value('autorun', 'True')
addToRegistry(os.path.realpath(sys.argv[0]))
else:
self.parser.set_value('autorun', 'False')
removeFromRegistry()
def stayOnTopCheckBox(self, state):
if state == QtCore.Qt.Checked:
self.parser.set_value('always_on_top', 'True')
else:
self.parser.set_value('always_on_top', 'False')
def retranslateUi(self):
self.autoUpdatecheckBox.setText(QtGui.QApplication.translate("Options", "Auto synchronise Catalog", None, QtGui.QApplication.UnicodeUTF8))
self.startWithWindowsCheckbox.setText(QtGui.QApplication.translate("Options", "Start with Windows", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxStayOnTop.setText(QtGui.QApplication.translate("Options", "Stay always on top", None, QtGui.QApplication.UnicodeUTF8))
self.numResultDispalyed.setText(QtGui.QApplication.translate("Options", "Number of results displayed:", None, QtGui.QApplication.UnicodeUTF8))
self.autoUpdatepyLauncher.setText(QtGui.QApplication.translate("Options", "Auto Update on new version", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Options", "pyLauncher", None, QtGui.QApplication.UnicodeUTF8))
self.pylMainTab.setTabText(self.pylMainTab.indexOf(self.optionsMainTab), QtGui.QApplication.translate("Options", "pyLauncher Main", None, QtGui.QApplication.UnicodeUTF8))
self.availPlugsBox.setTitle(QtGui.QApplication.translate("Options", "Available Plugins", None, QtGui.QApplication.UnicodeUTF8))
self.pylMainTab.setTabText(self.pylMainTab.indexOf(self.pylPluginsTab), QtGui.QApplication.translate("Options", "pyLauncher Plugins", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Options", "pyLauncher", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Options", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt; font-weight:600;\">pyLauncher</span><span style=\" font-size:10pt;\"> is a free utility for </span><span style=\" font-size:10pt; font-weight:600;\">Microsoft Window</span><span style=\" font-size:10pt;\">s designed to help you forget about your Start Menu, the Icons on your Desktop, </span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">and even your File Manager.</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:10pt;\">If you want to help improve pyLauncher or fill a bug report please visit: </span><a href=\"https://github.com/BGS/pyLauncher\"><span style=\" font-size:10pt; text-decoration: underline; color:#0000ff;\">pyLauncher Home Page</span></a></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.pylMainTab.setTabText(self.pylMainTab.indexOf(self.pylAbout), QtGui.QApplication.translate("Options", "About pyLauncher", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -7,839,995,187,203,604,000 | 47.522449 | 469 | 0.681023 | false |
dakcarto/suite-qgis-plugin | src/opengeo/gui/dialogs/projectdialog.py | 1 | 4580 | from PyQt4 import QtGui, QtCore
class PublishProjectDialog(QtGui.QDialog):
def __init__(self, catalogs, parent = None):
super(PublishProjectDialog, self).__init__(parent)
self.catalogs = catalogs
self.catalog = None
self.workspace = None
self.text = None
self.initGui()
def initGui(self):
layout = QtGui.QVBoxLayout()
self.setWindowTitle('Publish project')
verticalLayout = QtGui.QVBoxLayout()
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
catalogLabel = QtGui.QLabel('Catalog')
self.catalogBox = QtGui.QComboBox()
self.catalogBox.addItems(self.catalogs.keys())
self.catalogBox.currentIndexChanged.connect(self.catalogHasChanged)
horizontalLayout.addWidget(catalogLabel)
horizontalLayout.addWidget(self.catalogBox)
verticalLayout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
workspaceLabel = QtGui.QLabel('Workspace')
self.workspaceBox = QtGui.QComboBox()
cat = self.catalogs[self.catalogs.keys()[0]]
self.workspaces = cat.get_workspaces()
try:
defaultWorkspace = cat.get_default_workspace()
defaultWorkspace.fetch()
defaultName = defaultWorkspace.dom.find('name').text
except:
defaultName = None
workspaceNames = [w.name for w in self.workspaces]
self.workspaceBox.addItems(workspaceNames)
if defaultName is not None:
self.workspaceBox.setCurrentIndex(workspaceNames.index(defaultName))
horizontalLayout.addWidget(workspaceLabel)
horizontalLayout.addWidget(self.workspaceBox)
verticalLayout.addLayout(horizontalLayout)
self.destGroupBox = QtGui.QGroupBox()
self.destGroupBox.setLayout(verticalLayout)
verticalLayout = QtGui.QVBoxLayout()
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
groupLabel = QtGui.QLabel('Global group name')
self.groupNameBox = QtGui.QLineEdit()
self.groupNameBox.setPlaceholderText("[leave empty if no global group should be created]")
horizontalLayout.addWidget(groupLabel)
horizontalLayout.addWidget(self.groupNameBox)
verticalLayout.addLayout(horizontalLayout)
self.groupGroupBox = QtGui.QGroupBox()
self.groupGroupBox.setLayout(verticalLayout)
layout.addWidget(self.destGroupBox)
layout.addWidget(self.groupGroupBox)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Close)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.cancelPressed)
self.resize(400,200)
def catalogHasChanged(self):
catalog = self.catalogs[self.catalogBox.currentText()]
self.workspaces = catalog.get_workspaces()
try:
defaultWorkspace = catalog.get_default_workspace()
defaultWorkspace.fetch()
defaultName = defaultWorkspace.dom.find('name').text
except:
defaultName = None
workspaceNames = [w.name for w in self.workspaces]
self.workspaceBox.clear()
self.workspaceBox.addItems(workspaceNames)
if defaultName is not None:
self.workspaceBox.setCurrentIndex(workspaceNames.index(defaultName))
def okPressed(self):
self.catalog = self.catalogs[self.catalogBox.currentText()]
self.workspace = self.workspaces[self.workspaceBox.currentIndex()]
self.groupName = self.groupNameBox.text()
if self.groupName.strip() == "":
self.groupName = None
self.close()
def cancelPressed(self):
self.catalog = None
self.workspace = None
self.text = None
self.close()
| gpl-2.0 | -3,875,909,941,491,516,400 | 40.261261 | 126 | 0.604148 | false |
junhuac/MQUIC | src/tools/perf/benchmarks/dromaeo.py | 2 | 9603 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import os
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_test
from telemetry import story
from telemetry.value import scalar
from metrics import power
class _DromaeoMeasurement(page_test.PageTest):
def __init__(self):
super(_DromaeoMeasurement, self).__init__()
self._power_metric = None
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def WillStartBrowser(self, platform):
self._power_metric = power.PowerMetric(platform)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.document.getElementById("pause") &&' +
'window.document.getElementById("pause").value == "Run"',
120)
# Start spying on POST request that will report benchmark results, and
# intercept result data.
tab.ExecuteJavaScript('(function() {' +
' var real_jquery_ajax_ = window.jQuery;' +
' window.results_ = "";' +
' window.jQuery.ajax = function(request) {' +
' if (request.url == "store.php") {' +
' window.results_ =' +
' decodeURIComponent(request.data);' +
' window.results_ = window.results_.substring(' +
' window.results_.indexOf("=") + 1, ' +
' window.results_.lastIndexOf("&"));' +
' real_jquery_ajax_(request);' +
' }' +
' };' +
'})();')
# Starts benchmark.
tab.ExecuteJavaScript('window.document.getElementById("pause").click();')
tab.WaitForJavaScriptExpression('!!window.results_', 600)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
score = eval(tab.EvaluateJavaScript('window.results_ || "[]"'))
def Escape(k):
chars = [' ', '.', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
def AggregateData(container, key, value):
if key not in container:
container[key] = {'count': 0, 'sum': 0}
container[key]['count'] += 1
container[key]['sum'] += math.log(value)
suffix = page.url[page.url.index('?') + 1:]
def AddResult(name, value):
important = False
if name == suffix:
important = True
results.AddValue(scalar.ScalarValue(
results.current_page, Escape(name), 'runs/s', value, important))
aggregated = {}
for data in score:
AddResult('%s/%s' % (data['collection'], data['name']),
data['mean'])
top_name = data['collection'].split('-', 1)[0]
AggregateData(aggregated, top_name, data['mean'])
collection_name = data['collection']
AggregateData(aggregated, collection_name, data['mean'])
for key, value in aggregated.iteritems():
AddResult(key, math.exp(value['sum'] / value['count']))
class _DromaeoBenchmark(perf_benchmark.PerfBenchmark):
"""A base class for Dromaeo benchmarks."""
test = _DromaeoMeasurement
@classmethod
def Name(cls):
return 'dromaeo'
def CreateStorySet(self, options):
"""Makes a PageSet for Dromaeo benchmarks."""
# Subclasses are expected to define class members called query_param and
# tag.
if not hasattr(self, 'query_param') or not hasattr(self, 'tag'):
raise NotImplementedError('query_param or tag not in Dromaeo benchmark.')
archive_data_file = '../page_sets/data/dromaeo.%s.json' % self.tag
ps = story.StorySet(
archive_data_file=archive_data_file,
base_dir=os.path.dirname(os.path.abspath(__file__)),
cloud_storage_bucket=story.PUBLIC_BUCKET)
url = 'http://dromaeo.com?%s' % self.query_param
ps.AddStory(page_module.Page(
url, ps, ps.base_dir, make_javascript_deterministic=False))
return ps
class DromaeoDomCoreAttr(_DromaeoBenchmark):
"""Dromaeo DOMCore attr JavaScript benchmark.
Tests setting and getting DOM node attributes.
"""
tag = 'domcoreattr'
query_param = 'dom-attr'
@classmethod
def Name(cls):
return 'dromaeo.domcoreattr'
class DromaeoDomCoreModify(_DromaeoBenchmark):
"""Dromaeo DOMCore modify JavaScript benchmark.
Tests creating and injecting DOM nodes.
"""
tag = 'domcoremodify'
query_param = 'dom-modify'
@classmethod
def Name(cls):
return 'dromaeo.domcoremodify'
class DromaeoDomCoreQuery(_DromaeoBenchmark):
"""Dromaeo DOMCore query JavaScript benchmark.
Tests querying DOM elements in a document.
"""
tag = 'domcorequery'
query_param = 'dom-query'
@classmethod
def Name(cls):
return 'dromaeo.domcorequery'
class DromaeoDomCoreTraverse(_DromaeoBenchmark):
"""Dromaeo DOMCore traverse JavaScript benchmark.
Tests traversing a DOM structure.
"""
tag = 'domcoretraverse'
query_param = 'dom-traverse'
@classmethod
def Name(cls):
return 'dromaeo.domcoretraverse'
@benchmark.Disabled('win') # crbug.com/523276
class DromaeoJslibAttrJquery(_DromaeoBenchmark):
"""Dromaeo JSLib attr jquery JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrjquery'
query_param = 'jslib-attr-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrjquery'
class DromaeoJslibAttrPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib attr prototype JavaScript benchmark.
Tests setting and getting DOM node attributes using the jQuery JavaScript
Library.
"""
tag = 'jslibattrprototype'
query_param = 'jslib-attr-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibattrprototype'
@benchmark.Disabled('win') # crbug.com/523276
class DromaeoJslibEventJquery(_DromaeoBenchmark):
"""Dromaeo JSLib event jquery JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the jQuery JavaScript
Library.
"""
tag = 'jslibeventjquery'
query_param = 'jslib-event-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventjquery'
class DromaeoJslibEventPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib event prototype JavaScript benchmark.
Tests binding, removing, and triggering DOM events using the Prototype
JavaScript Library.
"""
tag = 'jslibeventprototype'
query_param = 'jslib-event-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibeventprototype'
# win: http://crbug.com/479796, http://crbug.com/529330, http://crbug.com/598705
# android: http://crbug.com/503138
# linux: http://crbug.com/583075
@benchmark.Disabled('win-reference', 'android', 'linux')
class DromaeoJslibModifyJquery(_DromaeoBenchmark):
"""Dromaeo JSLib modify jquery JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the jQuery
JavaScript Library.
"""
tag = 'jslibmodifyjquery'
query_param = 'jslib-modify-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyjquery'
class DromaeoJslibModifyPrototype(_DromaeoBenchmark):
"""Dromaeo JSLib modify prototype JavaScript benchmark.
Tests creating and injecting DOM nodes into a document using the Prototype
JavaScript Library.
"""
tag = 'jslibmodifyprototype'
query_param = 'jslib-modify-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibmodifyprototype'
@benchmark.Disabled('win') # crbug.com/523276
class DromaeoJslibStyleJquery(_DromaeoBenchmark):
"""Dromaeo JSLib style jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstylejquery'
query_param = 'jslib-style-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibstylejquery'
class DromaeoJslibStylePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib style prototype JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the jQuery
JavaScript Library.
"""
tag = 'jslibstyleprototype'
query_param = 'jslib-style-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibstyleprototype'
class DromaeoJslibTraverseJquery(_DromaeoBenchmark):
"""Dromaeo JSLib traverse jquery JavaScript benchmark.
Tests getting and setting CSS information on DOM elements using the Prototype
JavaScript Library.
"""
tag = 'jslibtraversejquery'
query_param = 'jslib-traverse-jquery'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraversejquery'
class DromaeoJslibTraversePrototype(_DromaeoBenchmark):
"""Dromaeo JSLib traverse prototype JavaScript benchmark.
Tests traversing a DOM structure using the jQuery JavaScript Library.
"""
tag = 'jslibtraverseprototype'
query_param = 'jslib-traverse-prototype'
@classmethod
def Name(cls):
return 'dromaeo.jslibtraverseprototype'
class DromaeoCSSQueryJquery(_DromaeoBenchmark):
"""Dromaeo CSS Query jquery JavaScript benchmark.
Tests traversing a DOM structure using the Prototype JavaScript Library.
"""
tag = 'cssqueryjquery'
query_param = 'cssquery-jquery'
@classmethod
def Name(cls):
return 'dromaeo.cssqueryjquery'
| mit | 3,813,881,779,592,105,000 | 27.580357 | 80 | 0.679059 | false |
ingenioustechie/zamboni | mkt/webapps/management/commands/generate_receipts.py | 18 | 3508 | import json
import os
import tempfile
import time
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from mkt.users.models import UserProfile
from mkt.webapps.models import Installed, Webapp
class Command(BaseCommand):
"""
Used to generate a whole pile of receipts that can be used for
load testing. The receipts need to be generated because they use
the receipt key for the particular server.
This will create users, addons and installed records, so that the
verify script can be load tested properly.
These records are placed into a JSON file. Run the delete command
to clean these out afterwards.
"""
option_list = BaseCommand.option_list + (
make_option('--action', action='store', type='string',
dest='action', help='Action: create, delete.'),
make_option('--dir', action='store', type='string',
dest='dir', help='Directory to read or write data.'),
make_option('--number', action='store', type='int',
default='10', dest='number',
help='Number of receipts, default: %default')
)
def filename(self, rest):
return os.path.join(self.dest, rest)
def handle(self, *args, **options):
self.dest = options.get('dir')
action = options.get('action')
if action not in ['create', 'delete']:
raise CommandError('Action: create or delete')
if not self.dest:
self.dest = tempfile.mkdtemp()
print '--dir not specified, using: %s' % self.dest
if not os.path.exists(self.dest):
print 'Creating output directory, %s' % self.dest
os.makedirs(self.dest)
self.number = options.get('number')
return getattr(self, action)()
def create(self):
"""
Creates users, webapps and installed records. Outputs the receipts
and created records into the supplied directory.
"""
created = {'users': [], 'webapps': [], 'installed': []}
number = self.number
stamp = str(time.time())
for x in xrange(number):
name = 'generate-receipt-%s-%s' % (stamp, x)
user = UserProfile.objects.create(email='%[email protected]' % name)
created['users'].append(user.pk)
for x in xrange(number):
name = 'generate-receipt-%s-%s' % (stamp, x)
addon = Webapp.objects.create(name=name,
manifest_url='http://a.com/m.webapp')
created['webapps'].append(addon.pk)
for x in xrange(number):
installed = Installed.objects.create(
addon_id=created['webapps'][x],
user_id=created['users'][x])
created['installed'].append(installed.pk)
filename = self.filename('%s.%s.receipt' %
(created['webapps'][x], x))
open(filename, 'w').write(installed.receipt)
open(self.filename('created.json'), 'w').write(json.dumps(created))
def delete(self):
"""Cleans up once the load testing is run and deletes the records."""
data = json.loads(open(self.filename('created.json'), 'r').read())
for obj, model in (['installed', Installed],
['webapps', Webapp],
['users', UserProfile]):
model.objects.filter(pk__in=data[obj]).delete()
| bsd-3-clause | -6,061,104,202,163,082,000 | 37.130435 | 79 | 0.580103 | false |
pydata/conf_site | symposion/proposals/templatetags/proposal_tags.py | 1 | 2338 | from django import template
from symposion.proposals.models import AdditionalSpeaker
register = template.Library()
class AssociatedProposalsNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
else:
raise template.TemplateSyntaxError("%r takes 'as var'" % bits[0])
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
request = context["request"]
if request.user.speaker_profile:
pending = AdditionalSpeaker.SPEAKING_STATUS_ACCEPTED
speaker = request.user.speaker_profile
queryset = AdditionalSpeaker.objects.filter(
speaker=speaker, status=pending
)
context[self.context_var] = [
item.proposalbase for item in queryset
]
else:
context[self.context_var] = None
return u""
class PendingProposalsNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
else:
raise template.TemplateSyntaxError("%r takes 'as var'" % bits[0])
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
request = context["request"]
if request.user.speaker_profile:
pending = AdditionalSpeaker.SPEAKING_STATUS_PENDING
speaker = request.user.speaker_profile
queryset = AdditionalSpeaker.objects.filter(
speaker=speaker, status=pending
)
context[self.context_var] = [
item.proposalbase for item in queryset
]
else:
context[self.context_var] = None
return u""
@register.tag
def pending_proposals(parser, token):
"""
{% pending_proposals as pending_proposals %}
"""
return PendingProposalsNode.handle_token(parser, token)
@register.tag
def associated_proposals(parser, token):
"""
{% associated_proposals as associated_proposals %}
"""
return AssociatedProposalsNode.handle_token(parser, token)
| mit | -2,728,229,123,176,684,000 | 28.974359 | 77 | 0.606501 | false |
chen2aaron/SnirteneCodes | Spider/souche_spider.py | 1 | 5865 | # -*- coding:utf-8 -*-
import requests
import re
import json
from time import sleep
from Utils import torndb, sql
"""
brands_list_url 搜车网的获取汽车品牌接口
请求参数
'type': 'car-subdivision',
返回json为
{'items': [{'check': None,
'code': 'brand-170',
'dindex': 2147483647,
'enName': '中兴',
'extString': '{pinyin=zhongxing}',
'id': None,
'level': None,
'name': 'Z 中兴',
'parentCode': None,
'type': None,
'updateDate': None},
]
}
brand_url 获取某汽车品牌下的各个系列
请求参数:
'type': 'car-subdivision',
'code': brand-code,
返回json为
{
'keys': ['阿尔法罗密欧'],
'codes': {'阿尔法罗密欧': [{'check': None,
'code': 'series-671',
'dindex': 2147483647,
'enName': 'ALFA 156',
'extString': '{factory=阿尔法罗密欧, pinyin=ALFA 156}',
'id': None,
'level': None,
'name': 'ALFA 156',
'parentCode': None,
'type': None,
'updateDate': None},
{'check': None,
'code': 'series-672',
'dindex': 2147483647,
'enName': 'ALFA 166',
'extString': '{factory=阿尔法罗密欧, pinyin=ALFA 166}',
'id': None,
'level': None,
'name': 'ALFA 166',
'parentCode': None,
'type': None,
'updateDate': None},
{'check': None,
'code': 'series-1222',
'dindex': 2147483647,
'enName': 'ALFA GT',
'extString': '{factory=阿尔法罗密欧, pinyin=ALFA GT}',
'id': None,
'level': None,
'name': 'ALFA GT',
'parentCode': None,
'type': None,
'updateDate': None}]},
}
series_url 获取某汽车某系列下的各个车辆型号
请求参数:
'type': 'car-subdivision',
'code': series-code,
返回json为
{
'items': [{'check': None,
'code': '32632',
'dindex': 2147483647,
'enName': '2014款 Vanquish 6.0L 百年纪念版',
'extString': '{}',
'id': None,
'level': None,
'name': '2014款 Vanquish 6.0L 百年纪念版',
'parentCode': None,
'type': None,
'updateDate': None},
]
}
"""
brands_list_url = 'http://souche.com/pages/dicAction/loadRootLevel.json'
brand_url = 'http://souche.com/pages/dicAction/loadRootLevelForCar.json'
series_url = 'http://souche.com/pages/dicAction/loadNextLevel.json'
def db_client():
db = torndb.Connection(
host='127.0.0.1:3306',
database='play',
user='root',
password='',
time_zone='+8:00',
charset='utf8mb4',
)
return db
def get_brands():
"""
获取大搜车的汽车品牌code
"""
brands_code = []
with open('souche_brand.html', 'r') as f:
for line in f.readlines():
brand_code = re.findall(r'data-code=(\S+)', line)
if brand_code:
brands_code.append(brand_code[0])
print(brands_code)
print('total: %s' % len(brands_code))
return brands_code
def get(url, code=None):
"""
通用get请求
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36',
'Origin': 'http://souche.com',
'Referer': 'http://souche.com/pages/caruser/sell_car.html',
}
params = {
'type': 'car-subdivision',
'code': code,
}
resp = requests.get(url, params=params, headers=headers)
return json.dumps(resp.json())
def save_into_db(brand_code, data):
args = {
'brand_code': brand_code,
'data': data,
}
db_client().execute(*sql.insert('car_json', args))
print('------ brand_code: %s -------' % brand_code)
sleep(2)
if __name__ == '__main__':
rows = db_client().query('select * from car_series_url where data=%s', '')
print(rows)
for row in rows:
series_code = row.series_code
resp = get(series_url, series_code)
db_client().update('update car_series_url set data=%s where series_code=%s', resp, series_code)
print(row.series_code)
# rows = db_client().query('select series_code from car_series order by id desc limit %s', 728)
# count = 240
# codes=[]
# for row in rows:
# series_code = row.series_code
# codes.append(series_code)
# print('第 %s 次: %s' % (count, series_code))
# count += 1
# codes.reverse()
#
# for c in codes:
# print(c)
# data = get(series_url, c)
# try:
# db_client().execute(*sql.insert('car_series_url', {'series_code':c, 'data': data}))
# except:
# db_client().execute(*sql.insert('car_series_url', {'series_code':c, 'data': ''}))
# sleep(0.6)
# for i in data:
#
# print(data)
# db_client().update('update car_json set brand_name=%s where brand_code=%s',)
# print(json.loads(get(brands_list_url)))
| gpl-2.0 | 3,052,965,385,087,952,400 | 29.527174 | 145 | 0.456115 | false |
anntzer/numpy | numpy/typing/tests/data/reveal/mod.py | 3 | 6991 | from typing import Any
import numpy as np
f8 = np.float64()
i8 = np.int64()
u8 = np.uint64()
f4 = np.float32()
i4 = np.int32()
u4 = np.uint32()
td = np.timedelta64(0, "D")
b_ = np.bool_()
b = bool()
f = float()
i = int()
AR_b: np.ndarray[Any, np.dtype[np.bool_]]
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
# Time structures
reveal_type(td % td) # E: numpy.timedelta64
reveal_type(AR_m % td) # E: Any
reveal_type(td % AR_m) # E: Any
reveal_type(divmod(td, td)) # E: Tuple[{int64}, numpy.timedelta64]
reveal_type(divmod(AR_m, td)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]]
reveal_type(divmod(td, AR_m)) # E: Union[Tuple[numpy.signedinteger[numpy.typing._64Bit], numpy.timedelta64], Tuple[numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[numpy.typing._64Bit]]], numpy.ndarray[Any, numpy.dtype[numpy.timedelta64]]]]
# Bool
reveal_type(b_ % b) # E: {int8}
reveal_type(b_ % i) # E: {int_}
reveal_type(b_ % f) # E: {float64}
reveal_type(b_ % b_) # E: {int8}
reveal_type(b_ % i8) # E: {int64}
reveal_type(b_ % u8) # E: {uint64}
reveal_type(b_ % f8) # E: {float64}
reveal_type(b_ % AR_b) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]
reveal_type(divmod(b_, b)) # E: Tuple[{int8}, {int8}]
reveal_type(divmod(b_, i)) # E: Tuple[{int_}, {int_}]
reveal_type(divmod(b_, f)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}]
reveal_type(divmod(b_, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(b_, u8)) # E: Tuple[{uint64}, {uint64}]
reveal_type(divmod(b_, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(b_, AR_b)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]]
reveal_type(b % b_) # E: {int8}
reveal_type(i % b_) # E: {int_}
reveal_type(f % b_) # E: {float64}
reveal_type(b_ % b_) # E: {int8}
reveal_type(i8 % b_) # E: {int64}
reveal_type(u8 % b_) # E: {uint64}
reveal_type(f8 % b_) # E: {float64}
reveal_type(AR_b % b_) # E: Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]
reveal_type(divmod(b, b_)) # E: Tuple[{int8}, {int8}]
reveal_type(divmod(i, b_)) # E: Tuple[{int_}, {int_}]
reveal_type(divmod(f, b_)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(b_, b_)) # E: Tuple[{int8}, {int8}]
reveal_type(divmod(i8, b_)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(u8, b_)) # E: Tuple[{uint64}, {uint64}]
reveal_type(divmod(f8, b_)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(AR_b, b_)) # E: Tuple[Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]], Union[{int8}, numpy.ndarray[Any, numpy.dtype[{int8}]]]]
# int
reveal_type(i8 % b) # E: {int64}
reveal_type(i8 % i) # E: {int64}
reveal_type(i8 % f) # E: {float64}
reveal_type(i8 % i8) # E: {int64}
reveal_type(i8 % f8) # E: {float64}
reveal_type(i4 % i8) # E: {int64}
reveal_type(i4 % f8) # E: {float64}
reveal_type(i4 % i4) # E: {int32}
reveal_type(i4 % f4) # E: {float32}
reveal_type(i8 % AR_b) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]
reveal_type(divmod(i8, b)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(i8, i)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(i8, f)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(i8, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i8, i4)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(i8, f4)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}]
reveal_type(divmod(i4, f4)) # E: Tuple[{float32}, {float32}]
reveal_type(divmod(i8, AR_b)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]]
reveal_type(b % i8) # E: {int64}
reveal_type(i % i8) # E: {int64}
reveal_type(f % i8) # E: {float64}
reveal_type(i8 % i8) # E: {int64}
reveal_type(f8 % i8) # E: {float64}
reveal_type(i8 % i4) # E: {int64}
reveal_type(f8 % i4) # E: {float64}
reveal_type(i4 % i4) # E: {int32}
reveal_type(f4 % i4) # E: {float32}
reveal_type(AR_b % i8) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]
reveal_type(divmod(b, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(i, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(f, i8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i8, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(f8, i8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i4, i8)) # E: Tuple[{int64}, {int64}]
reveal_type(divmod(f4, i8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i4, i4)) # E: Tuple[{int32}, {int32}]
reveal_type(divmod(f4, i4)) # E: Tuple[{float32}, {float32}]
reveal_type(divmod(AR_b, i8)) # E: Tuple[Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]], Union[numpy.signedinteger[Any], numpy.ndarray[Any, numpy.dtype[numpy.signedinteger[Any]]]]]
# float
reveal_type(f8 % b) # E: {float64}
reveal_type(f8 % i) # E: {float64}
reveal_type(f8 % f) # E: {float64}
reveal_type(i8 % f4) # E: {float64}
reveal_type(f4 % f4) # E: {float32}
reveal_type(f8 % AR_b) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
reveal_type(divmod(f8, b)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f8, i)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f8, f)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f8, f4)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}]
reveal_type(divmod(f8, AR_b)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]]
reveal_type(b % f8) # E: {float64}
reveal_type(i % f8) # E: {float64}
reveal_type(f % f8) # E: {float64}
reveal_type(f8 % f8) # E: {float64}
reveal_type(f8 % f8) # E: {float64}
reveal_type(f4 % f4) # E: {float32}
reveal_type(AR_b % f8) # E: Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]
reveal_type(divmod(b, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(i, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f8, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f4, f8)) # E: Tuple[{float64}, {float64}]
reveal_type(divmod(f4, f4)) # E: Tuple[{float32}, {float32}]
reveal_type(divmod(AR_b, f8)) # E: Tuple[Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]], Union[numpy.floating[Any], numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]]]
| bsd-3-clause | -4,190,801,007,336,306,000 | 46.557823 | 243 | 0.643399 | false |
RealTimeWeb/wikisite | MoinMoin/util/diff_html.py | 1 | 5844 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - Side by side diffs
@copyright: 2002 Juergen Hermann <[email protected]>,
2002 Scott Moonen <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.support import difflib
from MoinMoin.wikiutil import escape
def indent(line):
eol = ''
while line and line[0] == '\n':
eol += '\n'
line = line[1:]
stripped = line.lstrip()
if len(line) - len(stripped):
line = " " * (len(line) - len(stripped)) + stripped
#return "%d / %d / %s" % (len(line), len(stripped), line)
return eol + line
# This code originally by Scott Moonen, used with permission.
def diff(request, old, new, old_top='', new_top='', old_bottom='', new_bottom='', old_top_class='', new_top_class='', old_bottom_class='', new_bottom_class=''):
""" Find changes between old and new and return
HTML markup visualising them.
@param old: old text [unicode]
@param new: new text [unicode]
@param old_top: Custom html for adding ontop of old revision column (optional)
@param old_bottom: Custom html for adding at bottom of old revision column (optional)
@param new_top: Custom html for adding ontop of new revision column (optional)
@param new_bottom: Custom html for adding at bottom of new revision column (optional)
@param old_top_class: Custom class for <td> with old_top content (optional)
@param new_top_class: Custom class for <td> with new_top content (optional)
@param old_bottom_class: Custom class for <td> with old_bottom content (optional)
@param new_bottom_class: Custom class for <td> with new_bottom content (optional)
"""
_ = request.getText
t_line = _("Line") + " %d"
seq1 = old.splitlines()
seq2 = new.splitlines()
seqobj = difflib.SequenceMatcher(None, seq1, seq2)
linematch = seqobj.get_matching_blocks()
result = """
<table class="diff">
"""
if old_top or new_top:
result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top)
if len(seq1) == len(seq2) and linematch[0] == (0, 0, len(seq1)):
# No differences.
result += '<tr><td class="diff-same" colspan="2">' + _("No differences found!") + '</td></tr>'
else:
result += """
<tr>
<td class="diff-removed"><span>%s</span></td>
<td class="diff-added"><span>%s</span></td>
</tr>
""" % (_('Deletions are marked like this.'), _('Additions are marked like this.'), )
lastmatch = (0, 0)
# Print all differences
for match in linematch:
# Starts of pages identical?
if lastmatch == match[0:2]:
lastmatch = (match[0] + match[2], match[1] + match[2])
continue
llineno, rlineno = lastmatch[0]+1, lastmatch[1]+1
result += """
<tr class="diff-title">
<td>%s:</td>
<td>%s:</td>
</tr>
""" % (request.formatter.line_anchorlink(1, llineno) + request.formatter.text(t_line % llineno) + request.formatter.line_anchorlink(0),
request.formatter.line_anchorlink(1, rlineno) + request.formatter.text(t_line % rlineno) + request.formatter.line_anchorlink(0))
leftpane = ''
rightpane = ''
linecount = max(match[0] - lastmatch[0], match[1] - lastmatch[1])
for line in range(linecount):
if line < match[0] - lastmatch[0]:
if line > 0:
leftpane += '\n'
leftpane += seq1[lastmatch[0] + line]
if line < match[1] - lastmatch[1]:
if line > 0:
rightpane += '\n'
rightpane += seq2[lastmatch[1] + line]
charobj = difflib.SequenceMatcher(None, leftpane, rightpane)
charmatch = charobj.get_matching_blocks()
if charobj.ratio() < 0.5:
# Insufficient similarity.
if leftpane:
leftresult = """<span>%s</span>""" % indent(escape(leftpane))
else:
leftresult = ''
if rightpane:
rightresult = """<span>%s</span>""" % indent(escape(rightpane))
else:
rightresult = ''
else:
# Some similarities; markup changes.
charlast = (0, 0)
leftresult = ''
rightresult = ''
for thismatch in charmatch:
if thismatch[0] - charlast[0] != 0:
leftresult += """<span>%s</span>""" % indent(
escape(leftpane[charlast[0]:thismatch[0]]))
if thismatch[1] - charlast[1] != 0:
rightresult += """<span>%s</span>""" % indent(
escape(rightpane[charlast[1]:thismatch[1]]))
leftresult += escape(leftpane[thismatch[0]:thismatch[0] + thismatch[2]])
rightresult += escape(rightpane[thismatch[1]:thismatch[1] + thismatch[2]])
charlast = (thismatch[0] + thismatch[2], thismatch[1] + thismatch[2])
leftpane = '<br>'.join([indent(x) for x in leftresult.splitlines()])
rightpane = '<br>'.join([indent(x) for x in rightresult.splitlines()])
# removed width="50%%"
result += """
<tr>
<td class="diff-removed">%s</td>
<td class="diff-added">%s</td>
</tr>
""" % (leftpane, rightpane)
lastmatch = (match[0] + match[2], match[1] + match[2])
if old_bottom or new_bottom:
result += '<tr><td class="%s">%s</td><td class="%s">%s</td></tr>' % (old_top_class, old_top, new_top_class, new_top)
result += '</table>\n'
return result
| apache-2.0 | 5,856,964,714,600,919,000 | 38.755102 | 160 | 0.542779 | false |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/EXT/texture_mirror_clamp.py | 9 | 1396 | '''OpenGL extension EXT.texture_mirror_clamp
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_mirror_clamp to provide a more
Python-friendly API
Overview (from the spec)
EXT_texture_mirror_clamp extends the set of texture wrap modes to
include three modes (GL_MIRROR_CLAMP_EXT, GL_MIRROR_CLAMP_TO_EDGE_EXT,
GL_MIRROR_CLAMP_TO_BORDER_EXT) that effectively use a texture map
twice as large as the original image in which the additional half
of the new image is a mirror image of the original image.
This new mode relaxes the need to generate images whose opposite
edges match by using the original image to generate a matching
"mirror image". This mode allows the texture to be mirrored only
once in the negative s, t, and r directions.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_mirror_clamp.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_mirror_clamp import *
from OpenGL.raw.GL.EXT.texture_mirror_clamp import _EXTENSION_NAME
def glInitTextureMirrorClampEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | mit | 6,614,285,079,264,739,000 | 37.805556 | 71 | 0.786533 | false |
windskyer/nova | nova/virt/vmwareapi/vif.py | 3 | 7803 | # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for VMware."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import versionutils
from oslo_vmware import vim_util
from nova import exception
from nova.i18n import _, _LW
from nova.network import model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
vmwareapi_vif_opts = [
cfg.StrOpt('vlan_interface',
default='vmnic0',
help='Physical ethernet adapter name for vlan networking'),
cfg.StrOpt('integration_bridge',
help='This option should be configured only when using the '
'NSX-MH Neutron plugin. This is the name of the '
'integration bridge on the ESXi. This should not be set '
'for any other Neutron plugin. Hence the default value '
'is not set.'),
]
CONF.register_opts(vmwareapi_vif_opts, 'vmware')
def _get_associated_vswitch_for_interface(session, interface, cluster=None):
# Check if the physical network adapter exists on the host.
if not network_util.check_if_vlan_interface_exists(session,
interface, cluster):
raise exception.NetworkAdapterNotFound(adapter=interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_util.get_vswitch_for_vlan_interface(
session, interface, cluster)
if not vswitch_associated:
raise exception.SwitchNotFoundForNetworkAdapter(adapter=interface)
return vswitch_associated
def ensure_vlan_bridge(session, vif, cluster=None, create_vlan=True):
"""Create a vlan and bridge unless they already exist."""
vlan_num = vif['network'].get_meta('vlan')
bridge = vif['network']['bridge']
vlan_interface = CONF.vmware.vlan_interface
network_ref = network_util.get_network_with_the_name(session, bridge,
cluster)
if network_ref and network_ref['type'] == 'DistributedVirtualPortgroup':
return network_ref
if not network_ref:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
vswitch_associated = _get_associated_vswitch_for_interface(session,
vlan_interface, cluster)
network_util.create_port_group(session, bridge,
vswitch_associated,
vlan_num if create_vlan else 0,
cluster)
network_ref = network_util.get_network_with_the_name(session,
bridge,
cluster)
elif create_vlan:
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = _get_associated_vswitch_for_interface(session,
vlan_interface, cluster)
# Get the vlan id and vswitch corresponding to the port group
_get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
pg_vlanid, pg_vswitch = _get_pg_info(session, bridge, cluster)
# Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
raise exception.InvalidVLANPortGroup(
bridge=bridge, expected=vswitch_associated,
actual=pg_vswitch)
# Check if the vlan id is proper for the port group
if pg_vlanid != vlan_num:
raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
pgroup=pg_vlanid)
return network_ref
def _check_ovs_supported_version(session):
# The port type 'ovs' is only support by the VC version 5.5 onwards
min_version = versionutils.convert_version_to_int(
constants.MIN_VC_OVS_VERSION)
vc_version = versionutils.convert_version_to_int(
vim_util.get_vc_version(session))
if vc_version < min_version:
LOG.warning(_LW('VMware vCenter version less than %(version)s '
'does not support the \'ovs\' port type.'),
{'version': constants.MIN_VC_OVS_VERSION})
def _get_neutron_network(session, cluster, vif):
if vif['type'] == model.VIF_TYPE_OVS:
_check_ovs_supported_version(session)
# Check if this is the NSX-MH plugin is used
if CONF.vmware.integration_bridge:
net_id = CONF.vmware.integration_bridge
use_external_id = False
network_type = 'opaque'
else:
net_id = vif['network']['id']
use_external_id = True
network_type = 'nsx.LogicalSwitch'
network_ref = {'type': 'OpaqueNetwork',
'network-id': net_id,
'network-type': network_type,
'use-external-id': use_external_id}
elif vif['type'] == model.VIF_TYPE_DVS:
network_id = vif['network']['bridge']
network_ref = network_util.get_network_with_the_name(
session, network_id, cluster)
if not network_ref:
raise exception.NetworkNotFoundForBridge(bridge=network_id)
else:
reason = _('vif type %s not supported') % vif['type']
raise exception.InvalidInput(reason=reason)
return network_ref
def get_network_ref(session, cluster, vif, is_neutron):
if is_neutron:
network_ref = _get_neutron_network(session, cluster, vif)
else:
create_vlan = vif['network'].get_meta('should_create_vlan', False)
network_ref = ensure_vlan_bridge(session, vif, cluster=cluster,
create_vlan=create_vlan)
return network_ref
def get_vif_dict(session, cluster, vif_model, is_neutron, vif):
mac = vif['address']
name = vif['network']['bridge'] or CONF.vmware.integration_bridge
ref = get_network_ref(session, cluster, vif, is_neutron)
return {'network_name': name,
'mac_address': mac,
'network_ref': ref,
'iface_id': vif['id'],
'vif_model': vif_model}
def get_vif_info(session, cluster, is_neutron, vif_model, network_info):
vif_infos = []
if network_info is None:
return vif_infos
for vif in network_info:
vif_infos.append(get_vif_dict(session, cluster, vif_model,
is_neutron, vif))
return vif_infos
def get_network_device(hardware_devices, mac_address):
"""Return the network device with MAC 'mac_address'."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ in vm_util.ALL_SUPPORTED_NETWORK_DEVICES:
if hasattr(device, 'macAddress'):
if device.macAddress == mac_address:
return device
| gpl-2.0 | -3,051,860,152,735,354,400 | 41.178378 | 78 | 0.614635 | false |
abtink/openthread | tests/toranj/test-041-lowpan-fragmentation.py | 9 | 3582 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: This test verifies 6LoWPAN fragmentation code by exchanging IPv6 messages with
# many different lengths between two nodes.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
node1 = wpan.Node()
node2 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
# Two-node network (node1 leader/router, node2 end-device)
node1.form('horizon') # "zero dawn"
node2.join_node(node1, node_type=wpan.JOIN_TYPE_END_DEVICE)
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Get the link local addresses
ll1 = node1.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]
ll2 = node2.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]
PORT = 1234
MSG_LEN_START = 100
MSG_LEN_END = 500
for msg_length in range(MSG_LEN_START, MSG_LEN_END):
sender = node1.prepare_tx((ll1, PORT), (ll2, PORT), msg_length)
recver = node2.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful)
verify(recver.was_successful)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| bsd-3-clause | -2,570,936,491,317,190,700 | 40.172414 | 121 | 0.591569 | false |
NeostreamTechnology/Microservices | venv/lib/python2.7/site-packages/functools32/reprlib32.py | 23 | 5126 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import __builtin__ as builtins
from itertools import islice
try:
from thread import get_ident
except ImportError:
from _dummy_thread32 import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| mit | -6,125,561,229,947,608,000 | 31.649682 | 79 | 0.527117 | false |
MasterAlish/kg_bible | bible_parser/saver.py | 1 | 2522 | import sqlite3
class BibleSqliteSaver(object):
def __init__(self, bible, db_name):
self.bible = bible
self.conn = sqlite3.connect(db_name)
def save(self):
self._init_tables()
for book in self.bible.books:
book_id = self.conn.execute("INSERT INTO 'books' (name) values ('%s')" % book.name).lastrowid
chapter_number = 0
for chapter in book.chapters:
chapter_number += 1
chapter_id = self.conn.execute("INSERT INTO 'chapters' (name, book_id, number) values ('%s', '%d', '%d')" %
(chapter.name, book_id, chapter_number)).lastrowid
header_number = 0
verse_number = 0
for header in chapter.headers:
header_number += 1
header_id = self.conn.execute("INSERT INTO 'headers' (name, chapter_id, number) values ('%s', '%d', '%d')" %
(header.name, chapter_id, header_number)).lastrowid
for verse in header.verses:
verse_number += 1
try:
self.conn.execute("INSERT INTO 'verses' (verse, header_id, chapter_id, number) values ('%s', '%d', '%d', '%d')" %
(verse.text.replace("'", "\""), header_id, chapter_id, verse_number))
except sqlite3.OperationalError as e:
print "Book %s chapter %d verse %s" %(book.name, chapter_id, verse.text)
raise e
self.conn.commit()
self.conn.close()
def _init_tables(self):
self.conn.execute("DROP TABLE IF EXISTS 'books'")
self.conn.execute("DROP TABLE IF EXISTS 'chapters'")
self.conn.execute("DROP TABLE IF EXISTS 'headers'")
self.conn.execute("DROP TABLE IF EXISTS 'verses'")
self.conn.execute('''CREATE TABLE books
(id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)''')
self.conn.execute(''' CREATE TABLE chapters
(id INTEGER PRIMARY KEY, number INTEGER, name TEXT, book_id INTEGER)''')
self.conn.execute(''' CREATE TABLE headers
(id INTEGER PRIMARY KEY AUTOINCREMENT, number INTEGER, name TEXT, chapter_id v)''')
self.conn.execute(''' CREATE TABLE verses
(id INTEGER PRIMARY KEY, number INTEGER, verse TEXT, header_id INTEGER, chapter_id INTEGER)''') | gpl-2.0 | -4,249,515,913,284,056,000 | 50.489796 | 141 | 0.530928 | false |
ITOO-UrFU/open-programs | open_programs/apps/disciplines/migrations/0001_initial.py | 1 | 4047 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-02 09:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Discipline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('archived', models.BooleanField(default=False, verbose_name='В архиве')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлен')),
('status', models.CharField(choices=[('h', 'Скрыт'), ('p', 'Опубликован')], default='h', max_length=1, verbose_name='Статус публикации')),
('title', models.CharField(default='', max_length=256, verbose_name='Название дисциплины')),
('description', models.TextField(blank=True, max_length=16384, null=True, verbose_name='Короткое описание')),
('labor', models.PositiveIntegerField(default=0, verbose_name='зачётных единиц')),
('period', models.IntegerField(default=1, verbose_name='Период освоения в модуле')),
('form', models.CharField(choices=[('e', 'Экзамен'), ('z', 'Зачет')], default='z', max_length=1, verbose_name='Форма контроля')),
('results_text', models.TextField(blank=True, default='', max_length=16384, verbose_name='Результаты обучения')),
('uni_uid', models.CharField(blank=True, max_length=256, null=True)),
('uni_discipline', models.CharField(blank=True, max_length=256, null=True)),
('uni_number', models.CharField(blank=True, max_length=256, null=True)),
('uni_section', models.CharField(blank=True, max_length=256, null=True)),
('uni_file', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'verbose_name': 'дисциплина',
'verbose_name_plural': 'дисциплины',
},
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('year', models.PositiveIntegerField(default=1970, verbose_name='Год поступления')),
('admission_semester', models.PositiveIntegerField(default=0, verbose_name='Семестр поступления')),
('training_semester', models.PositiveIntegerField(default=0, verbose_name='Семестр изучения')),
('discipline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='disciplines.Discipline')),
],
options={
'verbose_name': 'семестр изучения дисциплины',
'verbose_name_plural': 'семестры изучения дисциплины',
},
),
migrations.CreateModel(
name='TrainingTerms',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(default='', max_length=256, verbose_name='Наименование срока обучения')),
('limit', models.PositiveIntegerField(default=0, verbose_name='Лимит ЗЕ в год')),
],
options={
'verbose_name': 'срок обучения',
'verbose_name_plural': 'сроки обучения',
},
),
]
| unlicense | 2,942,173,603,907,159,600 | 52.57971 | 154 | 0.589938 | false |
nathanhilbert/cubes | cubes/server/blueprint.py | 6 | 19779 | # -*- coding: utf-8 -*-
from flask import Blueprint, Response, request, g, current_app, url_for, safe_join, make_response
from flask import render_template, redirect
from jinja2 import Template
import json, re
from functools import wraps
import sys
import traceback
from ..workspace import Workspace, SLICER_INFO_KEYS
from ..cells import Cell, cut_from_dict
from ..browser import SPLIT_DIMENSION_NAME
from ..errors import *
from ..formatters import JSONLinesGenerator, csv_generator
from .. import ext
from .logging import configured_request_log_handlers, RequestLogger
from .logging import AsyncRequestLogger
from .utils import *
from .errors import *
from .decorators import *
from .local import *
from .auth import NotAuthenticated
from collections import OrderedDict
from cubes import __version__
# TODO: missing features from the original Werkzeug Slicer:
# * /locales and localization
# * default cube: /aggregate
# * caching
# * root / index
# * response.headers.add("Access-Control-Allow-Origin", "*")
try:
import cubes_search
except ImportError:
cubes_search = None
__all__ = (
"slicer",
"API_VERSION"
)
API_VERSION = 2
# Cross-origin resource sharing – 20 days cache
CORS_MAX_AGE = 1728000
slicer = Blueprint("slicer", __name__, template_folder="templates", static_folder="visualizer")
# Before
# ------
def _store_option(config, option, default, type_=None, allowed=None,
section="server"):
"""Copies the `option` into the application config dictionary. `default`
is a default value, if there is no such option in `config`. `type_` can be
`bool`, `int` or `string` (default). If `allowed` is specified, then the
option should be only from the list of allowed options, otherwise a
`ConfigurationError` exception is raised.
"""
if config.has_option(section, option):
if type_ == "bool":
value = config.getboolean(section, option)
elif type_ == "int":
value = config.getint(section, option)
else:
value = config.get(section, option)
else:
value = default
if allowed and value not in allowed:
raise ConfigurationError("Invalued value '%s' for option '%s'"
% (value, option))
setattr(current_app.slicer, option, value)
@slicer.record_once
def initialize_slicer(state):
"""Create the workspace and configure the application context from the
``slicer.ini`` configuration."""
with state.app.app_context():
config = state.options["config"]
config = read_server_config(config)
# Create workspace and other app objects
# We avoid pollution of the current_app context, as we are a Blueprint
params = CustomDict()
current_app.slicer = params
current_app.slicer.config = config
# FIXME: this is a workaround, see Workspace note about _options
if "cubes_root" in state.options:
_options = {"cubes_root": state.options["cubes_root"]}
else:
_options = {}
if not hasattr(current_app, 'cubes_workspace'):
current_app.cubes_workspace = Workspace(config, **_options)
# Configure the application
_store_option(config, "prettyprint", False, "bool")
_store_option(config, "json_record_limit", 1000, "int")
_store_option(config, "hide_private_cuts", False, "bool")
_store_option(config, "allow_cors_origin", None, "str")
_store_option(config, "visualizer", None, "str")
_store_option(config, "authentication", "none")
method = current_app.slicer.authentication
if method is None or method == "none":
current_app.slicer.authenticator = None
else:
if config.has_section("authentication"):
options = dict(config.items("authentication"))
else:
options = {}
current_app.slicer.authenticator = ext.authenticator(method,
**options)
logger.debug("Server authentication method: %s" % (method or "none"))
if not current_app.slicer.authenticator and workspace.authorizer:
logger.warn("No authenticator specified, but workspace seems to "
"be using an authorizer")
# Collect query loggers
handlers = configured_request_log_handlers(config)
if config.has_option('server', 'asynchronous_logging'):
async_logging = config.getboolean("server", "asynchronous_logging")
else:
async_logging = False
if async_logging:
current_app.slicer.request_logger = AsyncRequestLogger(handlers)
else:
current_app.slicer.request_logger = RequestLogger(handlers)
# Before and After
# ================
@slicer.before_request
def process_common_parameters():
# TODO: setup language
# Copy from the application context
g.json_record_limit = current_app.slicer.json_record_limit
if "prettyprint" in request.args:
g.prettyprint = str_to_bool(request.args.get("prettyprint"))
else:
g.prettyprint = current_app.slicer.prettyprint
@slicer.before_request
def prepare_authorization():
if current_app.slicer.authenticator:
try:
identity = current_app.slicer.authenticator.authenticate(request)
except NotAuthenticated as e:
raise NotAuthenticatedError
else:
identity = None
# Authorization
# -------------
g.auth_identity = identity
# Error Handler
# =============
@slicer.errorhandler(UserError)
def user_error_handler(e):
error_type = e.__class__.error_type
error = OrderedDict()
error["error"] = error_type
error["message"] = str(e)
if hasattr(e, "hint") and e.hint:
error["hint"] = e.hint
if hasattr(e, "to_dict"):
error.update(e.to_dict())
code = server_error_codes.get(error_type, 400)
return jsonify(error), code
@slicer.errorhandler(404)
def page_not_found(e):
error = {
"error": "not_found",
"message": "The requested URL was not found on the server.",
"hint": "If you entered the URL manually please check your "
"spelling and try again."
}
return jsonify(error), 404
@slicer.errorhandler(InternalError)
def server_error(e):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
exc_name = exc_type.__name__
logger.error("Internal Cubes error ({}): {}".format(exc_name, exc_value))
tb = traceback.format_exception(exc_type, exc_value,
exc_traceback)
logger.debug("Exception stack trace:\n{}".format("".join(tb)))
error = {
"error": "internal_server_error",
"message": "Internal server error",
"hint": "Server administrators can learn more about the error from "
"the error logs (even more if they have 'debug' level)"
}
return jsonify(error), 500
# Endpoints
# =========
@slicer.route("/")
def show_index():
info = get_info()
has_about = any(key in info for key in SLICER_INFO_KEYS)
return render_template("index.html",
has_about=has_about,
**info)
@slicer.route("/version")
def show_version():
info = {
"version": __version__,
# Backward compatibility key
"server_version": __version__,
"api_version": API_VERSION
}
return jsonify(info)
def get_info():
if workspace.info:
info = OrderedDict(workspace.info)
else:
info = OrderedDict()
info["json_record_limit"] = current_app.slicer.json_record_limit
info["cubes_version"] = __version__
info["timezone"] = workspace.calendar.timezone_name
info["first_weekday"] = workspace.calendar.first_weekday
info["api_version"] = API_VERSION
# authentication
authinfo = {}
authinfo["type"] = (current_app.slicer.authentication or "none")
if g.auth_identity:
authinfo['identity'] = g.auth_identity
if current_app.slicer.authenticator:
ainfo = current_app.slicer.authenticator.info_dict(request)
authinfo.update(ainfo)
info['authentication'] = authinfo
return info
@slicer.route("/info")
def show_info():
return jsonify(get_info())
@slicer.route("/cubes")
def list_cubes():
cube_list = workspace.list_cubes(g.auth_identity)
# TODO: cache per-identity
return jsonify(cube_list)
@slicer.route("/cube/<cube_name>/model")
@requires_cube
def cube_model(cube_name):
if workspace.authorizer:
hier_limits = workspace.authorizer.hierarchy_limits(g.auth_identity,
cube_name)
else:
hier_limits = None
response = g.cube.to_dict(expand_dimensions=True,
with_mappings=False,
full_attribute_names=True,
create_label=True,
hierarchy_limits=hier_limits)
response["features"] = workspace.cube_features(g.cube)
return jsonify(response)
@slicer.route("/cube/<cube_name>/aggregate")
@requires_browser
@log_request("aggregate", "aggregates")
def aggregate(cube_name):
cube = g.cube
output_format = validated_parameter(request.args, "format",
values=["json", "csv"],
default="json")
header_type = validated_parameter(request.args, "header",
values=["names", "labels", "none"],
default="labels")
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.lower().split(',')
else:
fields = None
# Aggregates
# ----------
aggregates = []
for agg in request.args.getlist("aggregates") or []:
aggregates += agg.split("|")
drilldown = []
ddlist = request.args.getlist("drilldown")
if ddlist:
for ddstring in ddlist:
drilldown += ddstring.split("|")
prepare_cell("split", "split")
result = g.browser.aggregate(g.cell,
aggregates=aggregates,
drilldown=drilldown,
split=g.split,
page=g.page,
page_size=g.page_size,
order=g.order)
# Hide cuts that were generated internally (default: don't)
if current_app.slicer.hide_private_cuts:
result.cell = result.cell.public_cell()
if output_format == "json":
return jsonify(result)
elif output_format != "csv":
raise RequestError("unknown response format '%s'" % output_format)
# csv
if header_type == "names":
header = result.labels
elif header_type == "labels":
header = []
for l in result.labels:
# TODO: add a little bit of polish to this
if l == SPLIT_DIMENSION_NAME:
header.append('Matches Filters')
else:
header += [ attr.label or attr.name for attr in cube.get_attributes([l], aggregated=True) ]
else:
header = None
fields = result.labels
generator = csv_generator(result,
fields,
include_header=bool(header),
header=header)
headers = {"Content-Disposition": 'attachment; filename="aggregate.csv"'}
return Response(generator,
mimetype='text/csv',
headers=headers)
@slicer.route("/cube/<cube_name>/facts")
@requires_browser
@log_request("facts", "fields")
def cube_facts(cube_name):
# Request parameters
fields_str = request.args.get("fields")
if fields_str:
fields = fields_str.split(',')
else:
fields = None
# fields contain attribute names
if fields:
attributes = g.cube.get_attributes(fields)
else:
attributes = g.cube.all_fact_attributes
# Construct the field list
fields = [attr.ref for attr in attributes]
# Get the result
facts = g.browser.facts(g.cell,
fields=fields,
order=g.order,
page=g.page,
page_size=g.page_size)
# Add cube key to the fields (it is returned in the result)
fields.insert(0, g.cube.key or "id")
# Construct the header
labels = [attr.label or attr.name for attr in attributes]
labels.insert(0, g.cube.key or "id")
return formatted_response(facts, fields, labels)
@slicer.route("/cube/<cube_name>/fact/<fact_id>")
@requires_browser
def cube_fact(cube_name, fact_id):
fact = g.browser.fact(fact_id)
if fact:
return jsonify(fact)
else:
raise NotFoundError(fact_id, "fact",
message="No fact with id '%s'" % fact_id)
@slicer.route("/cube/<cube_name>/members/<dimension_name>")
@requires_browser
@log_request("members")
def cube_members(cube_name, dimension_name):
# TODO: accept level name
depth = request.args.get("depth")
level = request.args.get("level")
if depth and level:
raise RequestError("Both depth and level provided, use only one "
"(preferably level)")
if depth:
try:
depth = int(depth)
except ValueError:
raise RequestError("depth should be an integer")
try:
dimension = g.cube.dimension(dimension_name)
except KeyError:
raise NotFoundError(dimension_name, "dimension",
message="Dimension '%s' was not found" % dimension_name)
hier_name = request.args.get("hierarchy")
hierarchy = dimension.hierarchy(hier_name)
if not depth and not level:
depth = len(hierarchy)
elif level:
depth = hierarchy.level_index(level) + 1
values = g.browser.members(g.cell,
dimension,
depth=depth,
hierarchy=hierarchy,
page=g.page,
page_size=g.page_size)
result = {
"dimension": dimension.name,
"hierarchy": hierarchy.name,
"depth": len(hierarchy) if depth is None else depth,
"data": values
}
# Collect fields and labels
attributes = []
for level in hierarchy.levels_for_depth(depth):
attributes += level.attributes
fields = [attr.ref for attr in attributes]
labels = [attr.label or attr.name for attr in attributes]
return formatted_response(result, fields, labels, iterable=values)
@slicer.route("/cube/<cube_name>/cell")
@requires_browser
def cube_cell(cube_name):
details = g.browser.cell_details(g.cell)
if not g.cell:
g.cell = Cell(g.cube)
cell_dict = g.cell.to_dict()
for cut, detail in zip(cell_dict["cuts"], details):
cut["details"] = detail
return jsonify(cell_dict)
@slicer.route("/cube/<cube_name>/report", methods=["GET", "POST"])
@requires_browser
def cube_report(cube_name):
report_request = json.loads(request.data)
try:
queries = report_request["queries"]
except KeyError:
raise RequestError("Report request does not contain 'queries' key")
cell_cuts = report_request.get("cell")
if cell_cuts:
# Override URL cut with the one in report
cuts = [cut_from_dict(cut) for cut in cell_cuts]
cell = Cell(g.cube, cuts)
logger.info("using cell from report specification (URL parameters "
"are ignored)")
if workspace.authorizer:
cell = workspace.authorizer.restricted_cell(g.auth_identity,
cube=g.cube,
cell=cell)
else:
if not g.cell:
cell = Cell(g.cube)
else:
cell = g.cell
result = g.browser.report(cell, queries)
return jsonify(result)
@slicer.route("/cube/<cube_name>/search")
def cube_search(cube_name):
# TODO: this is ported from old Werkzeug slicer, requires revision
config = current_app.config
if config.has_section("search"):
options = dict(config.items("search"))
engine_name = options.pop("engine")
else:
raise ConfigurationError("Search engine is not configured.")
logger.debug("using search engine: %s" % engine_name)
search_engine = cubes_search.create_searcher(engine_name,
browser=g.browser,
locales=g.locales,
**options)
dimension = request.args.get("dimension")
if not dimension:
raise RequestError("No search dimension provided")
query = request.args.get("query")
if not query:
raise RequestError("No search query provided")
locale = g.locale or g.locales[0]
logger.debug("searching for '%s' in %s, locale %s"
% (query, dimension, locale))
search_result = search_engine.search(query, dimension, locale=locale)
result = {
"matches": search_result.dimension_matches(dimension),
"dimension": dimension,
"total_found": search_result.total_found,
"locale": locale
}
if search_result.error:
result["error"] = search_result.error
if search_result.warning:
result["warning"] = search_result.warning
return jsonify(result)
@slicer.route("/logout")
def logout():
if current_app.slicer.authenticator:
return current_app.slicer.authenticator.logout(request, g.auth_identity)
else:
return "logged out"
_VIS_CONFIG_PATTERN = re.compile(r"<!--\s*VISUALIZER CONFIG.+?-->(?msu)")
_VIS_CONFIG_SCRIPT_TEMPLATE = Template(u"""
<script type="text/javascript">
VisualizerConfig.cubesUrl = "{{serverUrl}}";
VisualizerConfig.splashScreen = false;
</script>
""")
@slicer.route("/visualizer/")
@slicer.route("/visualizer/index.html")
def get_visualizer():
viz = current_app.slicer.visualizer
# Use the default visualizer
if viz == "default":
with open(safe_join(slicer.static_folder, 'index.html'), 'rb') as indexfd:
index_contents = indexfd.read().decode('utf8')
index_contents = _VIS_CONFIG_PATTERN.sub(_VIS_CONFIG_SCRIPT_TEMPLATE.render(serverUrl=url_for('.show_index', _external=True)), index_contents)
return make_response(index_contents)
elif viz:
return redirect(viz)
else:
raise PageNotFoundError("Visualizer not configured")
@slicer.after_request
def add_cors_headers(response):
"""Add Cross-origin resource sharing headers."""
origin = current_app.slicer.allow_cors_origin
if origin and len(origin):
if request.method == 'OPTIONS':
response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With'
# OPTIONS preflight requests need to receive origin back instead of wildcard
if origin == '*':
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', origin)
else:
response.headers['Access-Control-Allow-Origin'] = origin
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Max-Age'] = CORS_MAX_AGE
return response
| mit | -4,615,425,646,112,495,000 | 29.949922 | 150 | 0.598372 | false |
popazerty/enigma2cuberevo | lib/python/Plugins/Extensions/MediaScanner/plugin.py | 6 | 3157 | from Plugins.Plugin import PluginDescriptor
from Components.Scanner import scanDevice
from Screens.InfoBar import InfoBar
from os import access, F_OK, R_OK
def execute(option):
print "execute", option
if option is None:
return
(_, scanner, files, session) = option
scanner.open(files, session)
def mountpoint_choosen(option):
if option is None:
return
from Screens.ChoiceBox import ChoiceBox
print "scanning", option
(description, mountpoint, session) = option
res = scanDevice(mountpoint)
list = [ (r.description, r, res[r], session) for r in res ]
if not list:
from Screens.MessageBox import MessageBox
if access(mountpoint, F_OK|R_OK):
session.open(MessageBox, _("No displayable files on this medium found!"), MessageBox.TYPE_ERROR)
else:
print "ignore", mountpoint, "because its not accessible"
return
session.openWithCallback(execute, ChoiceBox,
title = _("The following files were found..."),
list = list)
def scan(session):
from Screens.ChoiceBox import ChoiceBox
from Components.Harddisk import harddiskmanager
parts = [ (r.description, r.mountpoint, session) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
if parts:
for x in parts:
if not access(x[1], F_OK|R_OK):
parts.remove(x)
session.openWithCallback(mountpoint_choosen, ChoiceBox, title = _("Please Select Medium to be Scanned"), list = parts)
def main(session, **kwargs):
scan(session)
def menuEntry(*args):
mountpoint_choosen(args)
from Components.Harddisk import harddiskmanager
def menuHook(menuid):
if menuid != "mainmenu":
return [ ]
from Tools.BoundFunction import boundFunction
return [(("%s (files)") % r.description, boundFunction(menuEntry, r.description, r.mountpoint), "hotplug_%s" % r.mountpoint, None) for r in harddiskmanager.getMountedPartitions(onlyhotplug = True)]
global_session = None
def partitionListChanged(action, device):
if InfoBar.instance:
if InfoBar.instance.execing:
if action == 'add' and device.is_hotplug:
print "mountpoint", device.mountpoint
print "description", device.description
print "force_mounted", device.force_mounted
mountpoint_choosen((device.description, device.mountpoint, global_session))
else:
print "main infobar is not execing... so we ignore hotplug event!"
else:
print "hotplug event.. but no infobar"
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
harddiskmanager.on_partition_list_change.append(partitionListChanged)
elif reason == 1:
harddiskmanager.on_partition_list_change.remove(partitionListChanged)
global_session = None
def Plugins(**kwargs):
return [
PluginDescriptor(name="MediaScanner", description=_("Scan Files..."), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = True, fnc=main),
# PluginDescriptor(where = PluginDescriptor.WHERE_MENU, fnc=menuHook),
PluginDescriptor(where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = True, fnc = sessionstart),
PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = True, fnc = autostart)
]
| gpl-2.0 | 1,347,949,624,936,353,800 | 31.214286 | 198 | 0.748812 | false |
hep7agon/city-feedback-hub | api/views.py | 1 | 7869 | import operator
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from rest_framework import status
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.views import APIView
from api.analysis import *
from api.models import Service, MediaFile
from api.services import get_feedbacks, attach_files_to_feedback, save_file_to_db
from .serializers import FeedbackSerializer, ServiceSerializer, FeedbackDetailSerializer
class RequestBaseAPIView(APIView):
item_tag_name = 'request'
root_tag_name = 'requests'
class FeedbackList(RequestBaseAPIView):
def get(self, request, format=None):
service_object_id = request.query_params.get('service_object_id', None)
service_object_type = request.query_params.get('service_object_type', None)
if service_object_id is not None and service_object_type is None:
raise ValidationError(
"If service_object_id is included in the request, then service_object_type must be included.")
queryset = get_feedbacks(
service_request_ids=request.query_params.get('service_request_id', None),
service_codes=request.query_params.get('service_code', None),
start_date=request.query_params.get('start_date', None),
end_date=request.query_params.get('end_date', None),
statuses=request.query_params.get('status', None),
service_object_type=service_object_type,
service_object_id=service_object_id,
lat=request.query_params.get('lat', None),
lon=request.query_params.get('long', None),
radius=request.query_params.get('radius', None),
updated_after=request.query_params.get('updated_after', None),
updated_before=request.query_params.get('updated_before', None),
search=request.query_params.get('search', None),
agency_responsible=request.query_params.get('agency_responsible', None),
order_by=request.query_params.get('order_by', None),
use_limit=True
)
serializer = FeedbackSerializer(queryset, many=True,
context={'extensions': request.query_params.get('extensions', 'false')})
return Response(serializer.data)
def post(self, request, format=None):
# TODO: (for future releases) add API key rules
serializer = FeedbackDetailSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_feedback = serializer.save()
# save files in the same manner as it's done in feedback form
if request.FILES:
for filename, file in request.FILES.items():
save_file_to_db(file, new_feedback.service_request_id)
files = MediaFile.objects.filter(form_id=new_feedback.service_request_id)
if files:
attach_files_to_feedback(request, new_feedback, files)
response_data = {
'service_request_id': new_feedback.service_request_id,
'service_notice': ''
}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FeedbackDetail(RequestBaseAPIView):
def get(self, request, service_request_id, format=None):
queryset = get_feedbacks(
service_request_ids=service_request_id,
service_codes=request.query_params.get('service_code', None),
start_date=request.query_params.get('start_date', None),
end_date=request.query_params.get('end_date', None),
statuses=request.query_params.get('status', None),
lat=request.query_params.get('lat', None),
lon=request.query_params.get('long', None),
radius=request.query_params.get('radius', None),
updated_after=request.query_params.get('updated_after', None),
updated_before=request.query_params.get('updated_before', None),
search=request.query_params.get('search', None),
order_by=request.query_params.get('order_by', None)
)
serializer = FeedbackSerializer(queryset, many=True,
context={'extensions': request.query_params.get('extensions', 'false')})
return Response(serializer.data)
class ServiceList(APIView):
item_tag_name = 'service'
root_tag_name = 'services'
def get(self, request, format=None):
queryset = Service.objects.all()
serializer = ServiceSerializer(queryset, many=True)
return Response(serializer.data)
def get_service_statistics(request, service_code):
try:
service = Service.objects.get(service_code=service_code)
except ObjectDoesNotExist:
return JsonResponse({'status': 'error', 'message': 'unknown service code'},
status=status.HTTP_404_NOT_FOUND)
statistics = get_service_item_statistics(service)
return JsonResponse(statistics)
def get_services_statistics(request):
service_statistics = []
for service in Service.objects.all():
item = get_service_item_statistics(service)
service_statistics.append(item)
# Sort the rows by "total" column
service_statistics.sort(key=operator.itemgetter('total'), reverse=True)
return JsonResponse(service_statistics, safe=False)
def get_service_item_statistics(service):
item = {}
service_code = service.service_code
avg = get_avg_duration(get_closed_by_service_code(service_code))
median = get_median_duration(get_closed_by_service_code(service_code))
item["service_code"] = service.service_code
item["service_name"] = service.service_name
item["total"] = get_total_by_service(service_code)
item["closed"] = get_closed_by_service(service_code)
item["avg_sec"] = int(avg.total_seconds())
item["median_sec"] = int(median.total_seconds())
return item
def get_agency_item_statistics(agency_responsible):
item = {}
avg = get_avg_duration(get_closed_by_agency_responsible(agency_responsible))
median = get_median_duration(get_closed_by_agency_responsible(agency_responsible))
item["agency_responsible"] = agency_responsible
item["total"] = get_total_by_agency(agency_responsible)
item["closed"] = get_closed_by_agency(agency_responsible)
item["avg_sec"] = int(avg.total_seconds())
item["median_sec"] = int(median.total_seconds())
return item
def get_agency_statistics(request, agency):
feedbacks_with_agency = Feedback.objects.filter(agency_responsible__iexact=agency).count()
if feedbacks_with_agency == 0:
return JsonResponse({'status': 'error', 'message': 'unknown agency name'},
status=status.HTTP_404_NOT_FOUND)
statistics = get_agency_item_statistics(agency)
return JsonResponse(statistics)
def get_agencies_statistics(request):
agency_statistics = []
agencies = Feedback.objects.all().distinct("agency_responsible")
for agency in agencies:
item = get_agency_item_statistics(agency.agency_responsible)
agency_statistics.append(item)
# Sort the rows by "total" column
agency_statistics.sort(key=operator.itemgetter('total'), reverse=True)
return JsonResponse(agency_statistics, safe=False)
def get_agency_responsible_list(request):
feedbacks = Feedback.objects.all().distinct("agency_responsible").order_by('agency_responsible')
agencies = [f.agency_responsible for f in feedbacks]
return JsonResponse(agencies, safe=False)
| mit | -8,328,444,592,867,085,000 | 40.634921 | 114 | 0.657898 | false |
ctoher/pymatgen | pymatgen/io/tests/test_aseio.py | 3 | 1564 | # coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Mar 8, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 8, 2012"
import unittest
import os
from nose.exc import SkipTest
from pymatgen import Composition
from pymatgen.io.vaspio.vasp_input import Poscar
import pymatgen.io.aseio as aio
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class AseAtomsAdaptorTest(unittest.TestCase):
def test_get_atoms(self):
if not aio.ase_loaded:
raise SkipTest("ASE not present. Skipping...")
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
structure = p.structure
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
ase_composition = Composition(atoms.get_name())
self.assertEqual(ase_composition, structure.composition)
def test_get_structure(self):
if not aio.ase_loaded:
raise SkipTest("ASE not present. Skipping...")
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
atoms = aio.AseAtomsAdaptor.get_atoms(p.structure)
self.assertEqual(aio.AseAtomsAdaptor.get_structure(atoms).formula,
"Fe4 P4 O16")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
if aio.ase_loaded:
unittest.main()
else:
print("ASE not loaded. Skipping tests")
| mit | -263,003,649,504,093,440 | 27.436364 | 74 | 0.627877 | false |
kevin-coder/tensorflow-fork | tensorflow/contrib/eager/python/tfe.py | 4 | 5764 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Eager execution prototype.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
To use, at program startup, call `tf.enable_eager_execution()`.
@@metrics
@@list_devices
@@num_gpus
@@py_func
@@defun
@@function
@@make_template
@@implicit_gradients
@@implicit_value_and_gradients
@@gradients_function
@@value_and_gradients_function
@@GradientTape
@@run
@@enable_eager_execution
@@enable_remote_eager_execution
@@custom_gradient
@@add_execution_callback
@@clear_execution_callbacks
@@errstate
@@ExecutionCallback
@@inf_callback
@@inf_nan_callback
@@nan_callback
@@seterr
@@Iterator
@@Saver
@@restore_variables_on_create
@@Variable
@@get_optimizer_variables
@@EagerVariableStore
@@Network
@@Sequential
@@save_network_checkpoint
@@restore_network_checkpoint
@@Checkpoint
@@Checkpointable
@@executing_eagerly
@@in_eager_mode
@@set_execution_mode
@@execution_mode
@@async_wait
@@async_clear_error
@@set_server_def
@@run_test_in_graph_and_eager_modes
@@run_all_tests_in_graph_and_eager_modes
@@TensorSpec
@@connect_to_remote_host
@@DEVICE_PLACEMENT_EXPLICIT
@@DEVICE_PLACEMENT_WARN
@@DEVICE_PLACEMENT_SILENT
@@SYNC
@@ASYNC
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-bad-import-order,g-import-not-at-top,unused-import
#
from tensorflow.contrib.eager.python import metrics
from tensorflow.contrib.eager.python.datasets import Iterator
from tensorflow.contrib.eager.python.network import Network
from tensorflow.contrib.eager.python.network import Sequential
from tensorflow.contrib.eager.python.network import save_network_checkpoint
from tensorflow.contrib.eager.python.network import restore_network_checkpoint
from tensorflow.contrib.eager.python.saver import get_optimizer_variables
from tensorflow.contrib.eager.python.saver import restore_variables_on_create
from tensorflow.contrib.eager.python.saver import Saver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import function as _function_lib
from tensorflow.python.eager.context import DEVICE_PLACEMENT_EXPLICIT
from tensorflow.python.eager.context import DEVICE_PLACEMENT_WARN
from tensorflow.python.eager.context import DEVICE_PLACEMENT_SILENT
from tensorflow.python.eager.context import executing_eagerly
from tensorflow.python.eager.context import list_devices
from tensorflow.python.eager.context import set_execution_mode
from tensorflow.python.eager.context import execution_mode
from tensorflow.python.eager.context import async_wait
from tensorflow.python.eager.context import async_clear_error
from tensorflow.python.eager.context import SYNC
from tensorflow.python.eager.context import ASYNC
from tensorflow.python.eager.context import num_gpus
from tensorflow.python.eager.context import set_server_def
from tensorflow.python.eager.def_function import function
from tensorflow.python.eager.execution_callbacks import add_execution_callback
from tensorflow.python.eager.execution_callbacks import clear_execution_callbacks
from tensorflow.python.eager.execution_callbacks import errstate
from tensorflow.python.eager.execution_callbacks import ExecutionCallback
from tensorflow.python.eager.execution_callbacks import inf_callback
from tensorflow.python.eager.execution_callbacks import inf_nan_callback
from tensorflow.python.eager.execution_callbacks import nan_callback
from tensorflow.python.eager.execution_callbacks import seterr
from tensorflow.python.eager.remote import connect_to_remote_host
from tensorflow.python.framework.tensor_spec import TensorSpec
from tensorflow.python.framework.ops import enable_eager_execution
from tensorflow.python.framework.ops import enable_eager_execution_internal as enable_remote_eager_execution
from tensorflow.python.framework.ops import eager_run as run
from tensorflow.python.framework.test_util import run_in_graph_and_eager_modes as run_test_in_graph_and_eager_modes
from tensorflow.python.framework.test_util import run_all_in_graph_and_eager_modes as run_all_tests_in_graph_and_eager_modes
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.ops.variable_scope import EagerVariableStore
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import template
from tensorflow.python.training.tracking.tracking import AutoTrackable as Checkpointable
from tensorflow.python.training.tracking.util import CheckpointV1 as Checkpoint
from tensorflow.python.util.all_util import remove_undocumented
py_func = script_ops.eager_py_func
defun = _function_lib.defun
make_template = template.make_template_internal
implicit_gradients = backprop.implicit_grad
implicit_value_and_gradients = backprop.implicit_val_and_grad
gradients_function = backprop.gradients_function
value_and_gradients_function = backprop.val_and_grad_function
GradientTape = backprop.GradientTape # pylint: disable=invalid-name
in_eager_mode = executing_eagerly
remove_undocumented(__name__)
| apache-2.0 | 8,656,220,638,262,042,000 | 36.428571 | 124 | 0.810722 | false |
pmarques/ansible | lib/ansible/modules/iptables.py | 13 | 30939 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <[email protected]>
# Copyright: (c) 2017, Sébastien DA ROCHA <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <[email protected]>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules.If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command should operate on.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with C(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(ipv6-icmp) or C(icmpv6),
C(esp), C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from I(/etc/protocols) is also allowed.
- A C(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- C(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
elements: str
flags_set:
description:
- Flags to be set.
type: list
elements: str
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches make up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
elements: str
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of host to send the cloned packets.
- This option is only valid when C(jump) is set to C(TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if C(jump) is set to C(LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- Since there is no way to tell the source or destination ports of such
a packet (or ICMP type), such a packet will not match any rules which specify them.
- When the "!" argument precedes fragment argument, the rule will only match head fragments,
or unfragmented packets.
type: str
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
type: str
source_port:
description:
- Source port or port range specification.
- This can either be a service name or a port number.
- An inclusive range can also be specified, using the format C(first:last).
- If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
- If the first port is greater than the second one they will be swapped.
type: str
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
destination_ports:
description:
- This specifies multiple destination port numbers or port ranges to match in the multiport module.
- It can only be used in conjunction with the protocols tcp, udp, udplite, dccp and sctp.
type: list
elements: str
version_added: "2.11"
to_ports:
description:
- This specifies a destination port or range of ports to use, without
this, the destination port is never altered.
- This is only valid if the rule also specifies one of the protocol
C(tcp), C(udp), C(dccp) or C(sctp).
type: str
to_destination:
description:
- This specifies a destination address to use with C(DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with C(SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
type: str
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- Mutually exclusive with C(set_dscp_mark_class).
type: str
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- Mutually exclusive with C(set_dscp_mark).
type: str
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
type: str
ctstate:
description:
- A list of the connection states to match in the conntrack module.
- Possible values are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT).
type: list
elements: str
default: []
src_range:
description:
- Specifies the source IP range to match in the iprange module.
type: str
version_added: "2.8"
dst_range:
description:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
match_set:
description:
- Specifies a set name which can be defined by ipset.
- Must be used together with the match_set_flags parameter.
- When the C(!) argument is prepended then it inverts the rule.
- Uses the iptables set extension.
type: str
version_added: "2.11"
match_set_flags:
description:
- Specifies the necessary flags for the match_set parameter.
- Must be used together with the match_set parameter.
- Uses the iptables set extension.
type: str
choices: [ "src", "dst", "src,dst", "dst,src" ]
version_added: "2.11"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s').
type: str
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
type: str
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in match by owner rule.
- From Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
type: str
version_added: "2.1"
gid_owner:
description:
- Specifies the GID or group to use in match by owner rule.
type: str
version_added: "2.9"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
"jump: REJECT".'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'
type: str
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
type: bool
default: false
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- If you specify this parameter, all other parameters will be ignored.
- This parameter is used to set default policy for the given C(chain).
Do not confuse this with C(jump) parameter.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
wait:
description:
- Wait N seconds for the xtables lock to prevent multiple instances of
the program from running concurrently.
type: str
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Block specific IP
ansible.builtin.iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
- name: Forward port 80 to 8600
ansible.builtin.iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
- name: Allow related and established connections
ansible.builtin.iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
- name: Allow new incoming SYN packets on TCP port 22 (SSH)
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
- name: Match on IP ranges
ansible.builtin.iptables:
chain: FORWARD
src_range: 192.168.1.100-192.168.1.199
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
- name: Allow source IPs defined in ipset "admin_hosts" on port 22
ansible.builtin.iptables:
chain: INPUT
match_set: admin_hosts
match_set_flags: src
destination_port: 22
jump: ALLOW
- name: Tag all outbound tcp packets with DSCP mark 8
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
ansible.builtin.iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
- name: Insert a rule on line 5
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
action: insert
rule_num: 5
# Think twice before running following task as this may lock target system
- name: Set the policy for the INPUT chain to DROP
ansible.builtin.iptables:
chain: INPUT
policy: DROP
- name: Reject tcp with tcp-reset
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
- name: Set tcp flags
ansible.builtin.iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
- name: Iptables flush filter
ansible.builtin.iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: Iptables flush nat
ansible.builtin.iptables:
table: nat
chain: '{{ item }}'
flush: yes
with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
- name: Log packets arriving into an user-defined chain
ansible.builtin.iptables:
chain: LOGGING
action: append
state: present
limit: 2/second
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
- name: Allow connections on multiple ports
ansible.builtin.iptables:
chain: INPUT
protocol: tcp
destination_ports:
- "80"
- "443"
- "8081:8083"
jump: ACCEPT
'''
import re
from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def append_wait(rule, param, flag):
if param:
rule.extend([flag, param])
def construct_rule(params):
rule = []
append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
if params.get('jump') and params['jump'].lower() == 'tee':
append_param(rule, params['gateway'], '--gateway', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_match(rule, params['destination_ports'], 'multiport')
append_csv(rule, params['destination_ports'], '--dports')
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match_flag(rule, params['syn'], '--syn', True)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
if 'iprange' in params['match']:
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
elif params['src_range'] or params['dst_range']:
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
if 'set' in params['match']:
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
elif params['match_set']:
append_match(rule, params['match_set'], 'set')
append_param(rule, params['match_set'], '--match-set', False)
append_match_flag(rule, 'match', params['match_set_flags'], False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_match(rule, params['gid_owner'], 'owner')
append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
append_param(rule, params['gid_owner'], '--gid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params, make_rule=False)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def get_iptables_version(iptables_path, module):
cmd = [iptables_path, '--version']
rc, out, _ = module.run_command(cmd, check_rc=True)
return out.split('v')[1].rstrip('\n')
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
wait=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', elements='str', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list', elements='str'),
flags_set=dict(type='list', elements='str'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
log_prefix=dict(type='str'),
log_level=dict(type='str',
choices=['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error',
'warning', 'notice', 'info', 'debug'],
default=None,
),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
destination_ports=dict(type='list', elements='str', default=[]),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', elements='str', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
match_set=dict(type='str'),
match_set_flags=dict(type='str', choices=['src', 'dst', 'src,dst', 'dst,src']),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
gid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
]
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(msg="Either chain or flush parameter must be specified.")
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
elif module.params['jump'] != 'LOG':
module.fail_json(msg="Logging options can only be used with the LOG jump target.")
# Check if wait option is supported
iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
module.params['wait'] = ''
else:
module.params['wait'] = None
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Check only; don't modify
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
| gpl-3.0 | -194,049,165,952,212,800 | 35.223653 | 128 | 0.627736 | false |
rbovard/las_extractor | setup.py | 2 | 1057 | # -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='las_extractor',
version='0.1',
description='SITN, a sitn project',
author='sitn',
author_email='[email protected]',
url='http://www.ne.ch/sitn',
install_requires=[
'pyramid',
'SQLAlchemy',
'waitress',
'sqlahelper',
'pyramid_debugtoolbar',
'pyramid_tm',
'papyrus',
'pyshp',
'simplekml',
'numpy',
'pyyaml',
'pip',
],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
message_extractors={'las_extractor': [
('static/**', 'ignore', None),
('**.py', 'python', None),
('templates/**', 'mako', {'input_encoding': 'utf-8'})]},
zip_safe=False,
entry_points={
'paste.app_factory': [
'main = las_extractor:main',
],
},
)
| gpl-2.0 | 4,500,942,745,142,716,000 | 23.581395 | 66 | 0.534532 | false |
qnu/gxp | vgxp/pinfo.py | 3 | 16415 | #!/usr/bin/env python
import sys, os, os.path, socket, time, random, glob, re, pwd, errno, select, urllib
"""
/proc/stat
cpu <user> <nice> <system> <idle> <iowait> <irq> <softirq> ...
- user: normal processes executing in user mode
- nice: niced processes executing in user mode
- system: processes executing in kernel mode
- idle: twiddling thumbs
- iowait: waiting for I/O to complete
"""
DELETE_TIMEOUT = 20
flg_print_meminfo = True
flg_print_stateinfo = True
flg_print_netinfo = True
flg_print_diskinfo = True
flg_print_pinfo = True
poll_interval = 2.0
class procinfo:
def __init__ ( self, _u, _p, _g, _c ):
self.uid = _u
self.ppid = _p
self.pgid = _g
self.cmd = _c
self.sent = None
class pinfo_default_log_handler:
def handle_log ( self, line ):
print line
sys.stdout.flush ()
class pinfo_common:
def __init__ ( self, handler = pinfo_default_log_handler () ):
self.hn = socket.gethostname ().split ( '.', 1 ) [ 0 ]
self.process_table = {}
self.cputimes = [ {}, {} ]
self.btime = self.get_btime ()
self.log_handler = handler
self.netbytes = [(0,0),(0,0)]
self.diskpages = [None, None]
self.nfork = [None, None]
self.nfork_diff = 0
self.cpustat = [None, None]
self.cpustat_diff = [0,0,0,0,0]
self.uid_table = {}
self.state_dir = '/data/local/yuuki-s/tmp/'
self.stateconfig_mtime = 0
self.stateconfig_content = ''
self.state_mtime = 0
self.state_content = ''
self.pgid_mtime = 0
self.pgid_content = ''
try:
os.stat(self.state_dir + 'state.log')
except:
self.state_dir = '/home/yuuki-s/charlie-log/tmp/'
try:
os.stat(self.state_dir + 'state.log')
except:
# sys.stderr.write("state_dir not found!\n");
self.state_dir = None
#sys.stderr.write("state_dir is %s\n" % (self.state_dir))
def get_btime ( self ):
return time.time ()
#fp = open ( '/proc/stat', 'r' );
#btime = None
#while True:
# line = fp.readline ()
# if line == '':
# break
# v = line.split ()
# if v [ 0 ] == 'btime':
# btime = long ( v [ 1 ] )
# break
#fp.close ()
#assert btime != None
#return btime
def prg_cache_load ( self, idx, ts ):
self.cputimes [ idx ] = {}
procs = glob.glob ( '/proc/*' )
for dname in procs:
try:
if not os.path.isdir ( dname ):
continue
if re.compile ( '\d+' ).match ( dname, 6 ) == None:
continue
pid = int ( dname [ 6: ] )
fd = open ( dname + '/stat', 'r' )
if fd == None:
continue
statarray = fd.read ().split ()
fd.close ()
if not self.process_table.has_key ( pid ):
uid = os.stat ( dname ).st_uid
ppid = int ( statarray [ 3 ] )
pgid = int ( statarray [ 4 ] )
if ( ppid < 0 or pgid < 0 ):
continue
fd = open ( dname + '/cmdline', 'r' )
if fd == None:
continue
cmdline = fd.read ().split ( '\0' )
if len ( cmdline [ 0 ] ) > 0:
firstchar = cmdline [ 0 ] [ 0 ]
if firstchar == '.' or firstchar == '/':
cmdline [ 0 ] = os.path.split ( cmdline [ 0 ] ) [ 1 ]
cmdline [ 0 ] = re.compile ( '\s' ).sub ( '_', cmdline [ 0 ] )
else:
fd = open ( dname + '/status', 'r' )
cmdline [ 0 ] = '[' + fd.readline().split()[1] + ']'
pi = procinfo ( uid, ppid, pgid, cmdline [ 0 ] )
self.process_table [ pid ] = pi
putime = long ( statarray [ 13 ] )
pstime = long ( statarray [ 14 ] )
self.cputimes [ idx ] [ pid ] = putime + pstime
except OSError:
continue
except IOError:
continue
def calc_netdifference ( self, idx, ts ):
cur = self.netbytes [ idx ]
prev = self.netbytes [ idx ^ 1 ]
if prev[0] == 0:
# we send nothing for the first time
return "N 0 0"
else:
return "N %d %d" % (cur[0]-prev[0],cur[1]-prev[1])
def calc_difference ( self, idx, ts ):
buf = ''
cur = self.cputimes [ idx ]
prev = self.cputimes [ idx ^ 1 ]
delete_list = []
loadavg = 0
for pid, pi in self.process_table.iteritems ():
if cur.has_key ( pid ):
t = cur [ pid ]
if prev.has_key ( pid ):
t -= prev [ pid ]
if t > 0:
if pi.sent == None:
user = pi.uid
try:
if not self.uid_table.has_key ( pi.uid ):
user = pwd.getpwuid ( pi.uid ) [ 0 ]
self.uid_table [ pi.uid ] = user
user = self.uid_table [ pi.uid ]
except:
pass
buf += "P %d %s %s %d " % ( pid, pi.cmd, user, t )
else:
buf += "C %d %d " % ( pid, t )
pi.sent = ts
loadavg += t
else:
if pi.sent != None and ( ts - pi.sent > DELETE_TIMEOUT ):
buf += "T %d " % pid
delete_list.append ( pid )
elif prev.has_key ( pid ):
# old process
delete_list.append ( pid )
if pi.sent != None:
buf += "T %d " % pid
for pid in delete_list:
del self.process_table [ pid ]
return ( buf, loadavg / (poll_interval * 100.0) )
def get_netinfo ( self, idx ):
retry_flag = True
while retry_flag:
retry_flag = None
fp = open('/proc/net/dev', 'r')
rbytes = 0
wbytes = 0
if fp:
line = fp.readline()
line = fp.readline()
line_number = 2
while True:
line = fp.readline()
line_number += 1
if line == '':
break
values = []
try:
map(lambda x:values.extend(x.split()), line.split(":"))
if values[0].find("eth") == 0:
rbytes += long(values[1])
wbytes += long(values[9])
except IndexError:
sys.stderr.write("line %d: [[%s]]\n" % (line_number, line))
retry_flag = True
break
self.netbytes[idx] = (rbytes,wbytes)
def get_meminfo ( self ):
while True:
try:
m0 = m1 = m2 = 0
fp = open ( '/proc/meminfo', 'r' )
if fp == None:
return None
m_total = -1
m_free = -1
m_buffers = -1
m_cached = -1
s_total = -1
s_free = -1
while True:
line = fp.readline ()
if line == '':
break
( k, v ) = line.split ( None, 2 ) [ :2 ]
if k == "SwapTotal:":
s_total = long ( v )
elif k == "SwapFree:":
s_free = long ( v )
elif k == "MemTotal:":
m_total = long ( v )
elif k == "MemFree:":
m_free = long ( v )
elif k == "Buffers:":
m_buffers = long ( v )
elif k == "Cached:":
m_cached = long ( v )
fp.close ()
m0 = float ( m_total - m_free - m_buffers - m_cached ) / m_total;
m1 = float ( m_buffers + m_cached ) / m_total;
if ( s_total < -1 ) or ( s_free < -1 ):
m2 = -1
elif s_total == 0:
m2 = 0
else:
m2 = float ( s_total - s_free ) / s_total
return "M %.2f %.2f %.2f" % ( m0, m1, m2 )
except ValueError:
pass
def get_diskinfo ( self, idx ):
pgin = -1
pgout = -1
swin = -1
swout = -1
fp = None
try:
fp = open ( '/proc/vmstat', 'r' )
except IOError,(err,desc):
if err == errno.ENOENT:
pass
else:
raise
if fp:
# kernel 2.6
while True:
line = fp.readline ()
if line == '':
break
( k, v ) = line.split ( None, 2 ) [ :2 ]
if k == "pgpgin":
pgin = long ( v )
elif k == "pgpgout":
pgout = long ( v )
elif k == "pswpin":
swin = long ( v )
elif k == "pswpout":
swout = long ( v )
fp.close ()
if ( pgin == -1 ) or ( pgout == -1 ) or ( swin == -1 ) or ( swout == -1 ):
# kernel 2.4
fp = open ( '/proc/stat', 'r' );
if fp:
while True:
line = fp.readline ()
if line == '':
break
a = line.split ()
if len(a) != 3:
continue
( k, v1, v2 ) = a
if k == 'page':
pgin = long(v1)
pgout = long(v2)
elif k == 'swap':
swin = long(v1)
swout = long(v2)
self.diskpages[idx] = ( pgin, pgout, swin, swout )
def calc_diskdifference ( self, idx, ts ):
cur = self.diskpages [ idx ]
prev = self.diskpages [ idx ^ 1 ]
if prev == None:
# we send nothing for the first time
return "D 0 0 0 0"
else:
return "D %d %d %d %d" % (cur[0]-prev[0],cur[1]-prev[1],cur[2]-prev[2],cur[3]-prev[3])
def get_stateinfo ( self ):
if self.state_dir == None:
return ''
flag = False
fname = self.state_dir + 'state.log'
try:
mtime = os.stat ( fname ).st_mtime
if mtime != self.state_mtime:
content = ''
fp = open ( fname, 'r' )
content = fp.read()
fp.close()
if content != '' and content != self.state_content:
flag = True
self.state_mtime = mtime
self.state_content = content
fname = self.state_dir + 'pgidgroup.log'
mtime = os.stat ( fname ).st_mtime
if mtime != self.pgid_mtime:
content = ''
fp = open ( fname, 'r' )
content = fp.read()
fp.close()
content = urllib.urlencode({'a': content})[2:]
if content != '' and content != self.pgid_content:
flag = True
self.pgid_mtime = mtime
self.pgid_content = content
if flag:
return 'S %s %s' % (self.state_content, self.pgid_content)
except:
pass
return ''
def get_stateconfig ( self ):
if self.state_dir == None:
return ''
flag = False
fname = self.state_dir + 'state_config.dat'
mtime = os.stat ( fname ).st_mtime
if mtime != self.stateconfig_mtime:
content = ''
fp = open ( fname, 'r' )
content = fp.read()
fp.close()
content = urllib.urlencode({'a': content})[2:]
if content != '' and content != self.stateconfig_content:
flag = True
self.stateconfig_mtime = mtime
self.stateconfig_content = content
if flag:
return 'SS %s' % (self.stateconfig_content)
return ''
def get_procinfo ( self, idx ):
p = 0
fp = open ( '/proc/stat', 'r' );
if fp:
while True:
line = fp.readline ()
if line == '':
break
( k, v ) = line.split ( None, 1 ) [ :2 ]
# we assume "cpu" always appears earlier than "processes"
if k == 'processes':
p = long(v)
break
elif k == 'cpu':
self.cpustat [ idx ] = map(lambda x:long(x), v.split ( None, 5 ) [ :5 ])
if len(self.cpustat [ idx ]) == 4:
self.cpustat [ idx ].append(0L);
self.nfork [ idx ] = p
def calc_procdifference ( self, idx, ts ):
cur = self.nfork [ idx ]
prev = self.nfork [ idx ^ 1 ]
if prev == None:
# we send nothing for the first time
self.nfork_diff = 0
self.cpustat_diff = [ 0, 0, 0, 0, 0 ]
else:
self.nfork_diff = cur - prev
for i in range(5):
self.cpustat_diff [ i ] = self.cpustat [ idx ] [ i ] - self.cpustat [ idx ^ 1 ] [ i ]
def print_line ( self, l, ts, m, s, buf, netdiff, diskdiff ):
a = os.getloadavg()
curtime = long ( ts * 100 )
l = (self.cpustat_diff[0] + self.cpustat_diff[1]) / (poll_interval * 100.0)
line = "%s %d C %.2f %.2f %.2f %.2f %d " % ( self.hn, curtime, l, a[0], a[1], a[2], self.nfork_diff )
for v in self.cpustat_diff:
line += "%d " % (v)
if flg_print_meminfo and m != '':
line += "%s " % (m)
if flg_print_stateinfo and s != '':
line += "%s " % (s)
if flg_print_netinfo and netdiff != '':
line += "%s " % (netdiff)
if flg_print_diskinfo and diskdiff != '':
line += "%s " % (diskdiff)
if flg_print_pinfo:
line += "P %s" % (buf)
self.log_handler.handle_log ( line )
def main_loop ( self ):
i = 0
ts = time.time () - self.btime
self.prg_cache_load ( i, ts )
while True:
# comsume stdin
rfds,_,_ = select.select([sys.stdin],[],[],0)
if len(rfds) > 0:
for rfd in rfds:
_ = rfd.read(8192)
try:
time.sleep ( poll_interval )
except:
return
i ^= 1
ts = time.time () - self.btime
self.prg_cache_load ( i, ts );
self.get_procinfo(i);
self.get_netinfo(i);
self.get_diskinfo(i);
( buf, l ) = self.calc_difference ( i, ts )
netdiff = self.calc_netdifference ( i, ts )
diskdiff = self.calc_diskdifference ( i, ts )
self.calc_procdifference ( i, ts )
m = self.get_meminfo ()
s = self.get_stateinfo ()
ss = self.get_stateconfig ()
if ss != '':
s += ' ' + ss
self.print_line ( l, ts, m, s, buf, netdiff, diskdiff )
class pinfo_session:
def __init__ ( self ):
start_time = time.time ()
def main ():
try:
random.seed ()
time.sleep ( random.random () * poll_interval )
pinfo_common ().main_loop ()
except IOError,(err,desc):
if err == errno.EPIPE:
pass
else:
raise
if __name__ == "__main__":
main()
| bsd-3-clause | 8,217,518,457,316,253,000 | 34.684783 | 109 | 0.400792 | false |
Dino0631/RedRain-Bot | cogs/lib/nacl/__init__.py | 16 | 1170 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "PyNaCl"
__summary__ = ("Python binding to the Networking and Cryptography (NaCl) "
"library")
__uri__ = "https://github.com/pyca/pynacl/"
__version__ = "1.0.1"
__author__ = "The PyNaCl developers"
__email__ = "[email protected]"
__license__ = "Apache License 2.0"
__copyright__ = "Copyright 2013-2016 {0}".format(__author__)
| gpl-3.0 | 4,528,949,898,195,974,000 | 34.454545 | 74 | 0.677778 | false |
feend78/evennia | evennia/contrib/email_login.py | 1 | 12460 | """
Email-based login system
Evennia contrib - Griatch 2012
This is a variant of the login system that requires an email-address
instead of a username to login.
This used to be the default Evennia login before replacing it with a
more standard username + password system (having to supply an email
for some reason caused a lot of confusion when people wanted to expand
on it. The email is not strictly needed internally, nor is any
confirmation email sent out anyway).
Installation is simple:
To your settings file, add/edit the line:
```python
CMDSET_UNLOGGEDIN = "contrib.email_login.UnloggedinCmdSet"
```
That's it. Reload the server and try to log in to see it.
The initial login "graphic" will still not mention email addresses
after this change. The login splashscreen is taken from strings in
the module given by settings.CONNECTION_SCREEN_MODULE.
"""
import re
from django.conf import settings
from evennia.accounts.models import AccountDB
from evennia.objects.models import ObjectDB
from evennia.server.models import ServerConfig
from evennia.commands.cmdset import CmdSet
from evennia.utils import logger, utils, ansi
from evennia.commands.default.muxcommand import MuxCommand
from evennia.commands.cmdhandler import CMD_LOGINSTART
from evennia.commands.default import unloggedin as default_unloggedin # Used in CmdUnconnectedCreate
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate",
"CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp")
MULTISESSION_MODE = settings.MULTISESSION_MODE
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
CONNECTION_SCREEN = ""
try:
CONNECTION_SCREEN = ansi.parse_ansi(utils.random_string_from_module(CONNECTION_SCREEN_MODULE))
except Exception:
# malformed connection screen or no screen given
pass
if not CONNECTION_SCREEN:
CONNECTION_SCREEN = "\nEvennia: Error in CONNECTION_SCREEN MODULE" \
" (randomly picked connection screen variable is not a string). \nEnter 'help' for aid."
class CmdUnconnectedConnect(MuxCommand):
"""
Connect to the game.
Usage (at login screen):
connect <email> <password>
Use the create command to first create an account before logging in.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their `func()` receives
a session object instead of a `source_object` like all
other types of logged-in commands (this is because
there is no object yet before the account has logged in)
"""
session = self.caller
arglist = self.arglist
if not arglist or len(arglist) < 2:
session.msg("\n\r Usage (without <>): connect <email> <password>")
return
email = arglist[0]
password = arglist[1]
# Match an email address to an account.
account = AccountDB.objects.get_account_from_email(email)
# No accountname match
if not account:
string = "The email '%s' does not match any accounts." % email
string += "\n\r\n\rIf you are new you should first create a new account "
string += "using the 'create' command."
session.msg(string)
return
# We have at least one result, so we can check the password.
if not account[0].check_password(password):
session.msg("Incorrect password.")
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0] == account.name for tup in bans) or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "|rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.|x"
session.msg(string)
session.execute_cmd("quit")
return
# actually do the login. This will call all hooks.
session.sessionhandler.login(session, account)
class CmdUnconnectedCreate(MuxCommand):
"""
Create a new account.
Usage (at login screen):
create \"accountname\" <email> <password>
This creates a new account account.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
def parse(self):
"""
The parser must handle the multiple-word account
name enclosed in quotes:
connect "Long name with many words" [email protected] mypassw
"""
super(CmdUnconnectedCreate, self).parse()
self.accountinfo = []
if len(self.arglist) < 3:
return
if len(self.arglist) > 3:
# this means we have a multi_word accountname. pop from the back.
password = self.arglist.pop()
email = self.arglist.pop()
# what remains is the accountname.
accountname = " ".join(self.arglist)
else:
accountname, email, password = self.arglist
accountname = accountname.replace('"', '') # remove "
accountname = accountname.replace("'", "")
self.accountinfo = (accountname, email, password)
def func(self):
"""Do checks and create account"""
session = self.caller
try:
accountname, email, password = self.accountinfo
except ValueError:
string = "\n\r Usage (without <>): create \"<accountname>\" <email> <password>"
session.msg(string)
return
if not email or not password:
session.msg("\n\r You have to supply an e-mail address followed by a password.")
return
if not utils.validate_email_address(email):
# check so the email at least looks ok.
session.msg("'%s' is not a valid e-mail address." % email)
return
# sanity checks
if not re.findall(r"^[\w. @+\-']+$", accountname) or not (0 < len(accountname) <= 30):
# this echoes the restrictions made by django's auth
# module (except not allowing spaces, for convenience of
# logging in).
string = "\n\r Accountname can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_/' only."
session.msg(string)
return
# strip excessive spaces in accountname
accountname = re.sub(r"\s+", " ", accountname).strip()
if AccountDB.objects.filter(username__iexact=accountname):
# account already exists (we also ignore capitalization here)
session.msg("Sorry, there is already an account with the name '%s'." % accountname)
return
if AccountDB.objects.get_account_from_email(email):
# email already set on an account
session.msg("Sorry, there is already an account with that email address.")
return
# Reserve accountnames found in GUEST_LIST
if settings.GUEST_LIST and accountname.lower() in (guest.lower() for guest in settings.GUEST_LIST):
string = "\n\r That name is reserved. Please choose another Accountname."
session.msg(string)
return
if not re.findall(r"^[\w. @+\-']+$", password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @/./+/-/_/' only." \
"\nFor best security, make it longer than 8 characters. You can also use a phrase of" \
"\nmany words if you enclose the password in double quotes."
session.msg(string)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0] == accountname.lower() for tup in bans) or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "|rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.|x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return
# everything's ok. Create the new player account.
try:
permissions = settings.PERMISSION_ACCOUNT_DEFAULT
typeclass = settings.BASE_CHARACTER_TYPECLASS
new_account = default_unloggedin._create_account(session, accountname, password, permissions, email=email)
if new_account:
if MULTISESSION_MODE < 2:
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
default_unloggedin._create_character(session, new_account, typeclass, default_home, permissions)
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in accountname:
string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (accountname, email))
except Exception:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg("An error occurred. Please e-mail an admin if the problem persists.")
logger.log_trace()
raise
class CmdUnconnectedQuit(MuxCommand):
"""
We maintain a different version of the `quit` command
here for unconnected accounts for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"""Simply close the connection."""
session = self.caller
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
class CmdUnconnectedLook(MuxCommand):
"""
This is an unconnected version of the `look` command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"""Show the connect screen."""
self.caller.msg(CONNECTION_SCREEN)
class CmdUnconnectedHelp(MuxCommand):
"""
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"""Shows help"""
string = \
"""
You are not yet logged into the game. Commands available at this point:
|wcreate, connect, look, help, quit|n
To login to the system, you need to do one of the following:
|w1)|n If you have no previous account, you need to use the 'create'
command like this:
|wcreate "Anna the Barbarian" [email protected] c67jHL8p|n
It's always a good idea (not only here, but everywhere on the net)
to not use a regular word for your password. Make it longer than
3 characters (ideally 6 or more) and mix numbers and capitalization
into it.
|w2)|n If you have an account already, either because you just created
one in |w1)|n above or you are returning, use the 'connect' command:
|wconnect [email protected] c67jHL8p|n
This should log you in. Run |whelp|n again once you're logged in
to get more aid. Hope you enjoy your stay!
You can use the |wlook|n command if you want to see the connect screen again.
"""
self.caller.msg(string)
# command set for the mux-like login
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "Unloggedin"
priority = 0
def at_cmdset_creation(self):
"""Populate the cmdset"""
self.add(CmdUnconnectedConnect())
self.add(CmdUnconnectedCreate())
self.add(CmdUnconnectedQuit())
self.add(CmdUnconnectedLook())
self.add(CmdUnconnectedHelp())
| bsd-3-clause | -879,928,916,317,831,700 | 36.53012 | 120 | 0.630177 | false |
c86j224s/snippet | Python_Pygments/lib/python3.6/site-packages/pygments/lexers/pascal.py | 6 | 32651 | # -*- coding: utf-8 -*-
"""
pygments.lexers.pascal
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Pascal family languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \
using, this, default
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
from pygments.scanner import Scanner
# compatibility import
from pygments.lexers.modula2 import Modula2Lexer
__all__ = ['DelphiLexer', 'AdaLexer']
class DelphiLexer(Lexer):
"""
For `Delphi <http://www.borland.com/delphi/>`_ (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas', '*.dpr']
mimetypes = ['text/x-pascal']
TURBO_PASCAL_KEYWORDS = (
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
)
DELPHI_KEYWORDS = (
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
)
FREE_PASCAL_KEYWORDS = (
'dispose', 'exit', 'false', 'new', 'true'
)
BLOCK_KEYWORDS = set((
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
))
FUNCTION_MODIFIERS = set((
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
))
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = set((
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
))
BUILTIN_TYPES = set((
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
))
BUILTIN_UNITS = {
'System': (
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
),
'SysUtils': (
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
),
'Classes': (
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
),
'Math': (
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
)
}
ASM_REGISTERS = set((
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
))
ASM_INSTRUCTIONS = set((
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
))
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
self.builtins = set()
for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occours (and the parenthesis
# is balanced) we end the current block context
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
# same for properties
elif next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Text
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
class AdaLexer(RegexLexer):
"""
For Ada source code.
.. versionadded:: 1.3
"""
name = 'Ada'
aliases = ['ada', 'ada95', 'ada2005']
filenames = ['*.adb', '*.ads', '*.ada']
mimetypes = ['text/x-ada']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'--.*?\n', Comment.Single),
(r'[^\S\n]+', Text),
(r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
(r'(subtype|type)(\s+)(\w+)',
bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
(r'task|protected', Keyword.Declaration),
(r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
(r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
(r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text,
Comment.Preproc)),
(r'(true|false|null)\b', Keyword.Constant),
(words((
'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count',
'Cursor', 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator',
'Integer', 'Long_Float', 'Long_Integer', 'Long_Long_Float',
'Long_Long_Integer', 'Natural', 'Positive', 'Reference_Type',
'Short_Float', 'Short_Integer', 'Short_Short_Float',
'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'),
suffix=r'\b'),
Keyword.Type),
(r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
(r'generic|private', Keyword.Declaration),
(r'package', Keyword.Declaration, 'package'),
(r'array\b', Keyword.Reserved, 'array_def'),
(r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(\w+)(\s*)(:)(\s*)(constant)',
bygroups(Name.Constant, Text, Punctuation, Text,
Keyword.Reserved)),
(r'<<\w+>>', Name.Label),
(r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
(words((
'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all',
'array', 'at', 'begin', 'body', 'case', 'constant', 'declare',
'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry',
'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited',
'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding',
'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue',
'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized',
'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when',
'while', 'xor'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(r'"[^"]*"', String),
include('attribute'),
include('numbers'),
(r"'[^']'", String.Character),
(r'(\w+)(\s*|[(,])', bygroups(Name, using(this))),
(r"(<>|=>|:=|[()|:;,.'])", Punctuation),
(r'[*<>+=/&-]', Operator),
(r'\n+', Text),
],
'numbers': [
(r'[0-9_]+#[0-9a-f]+#', Number.Hex),
(r'[0-9_]+\.[0-9_]*', Number.Float),
(r'[0-9_]+', Number.Integer),
],
'attribute': [
(r"(')(\w+)", bygroups(Punctuation, Name.Attribute)),
],
'subprogram': [
(r'\(', Punctuation, ('#pop', 'formal_part')),
(r';', Punctuation, '#pop'),
(r'is\b', Keyword.Reserved, '#pop'),
(r'"[^"]+"|\w+', Name.Function),
include('root'),
],
'end': [
('(if|case|record|loop|select)', Keyword.Reserved),
(r'"[^"]+"|[\w.]+', Name.Function),
(r'\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
(r';', Punctuation, '#pop'),
(r'\(', Punctuation, 'formal_part'),
(r'with|and|use', Keyword.Reserved),
(r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
(r'record\b', Keyword.Reserved, ('record_def')),
(r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'),
include('root'),
],
'array_def': [
(r';', Punctuation, '#pop'),
(r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)),
include('root'),
],
'record_def': [
(r'end record', Keyword.Reserved, '#pop'),
include('root'),
],
'import': [
(r'[\w.]+', Name.Namespace, '#pop'),
default('#pop'),
],
'formal_part': [
(r'\)', Punctuation, '#pop'),
(r'\w+', Name.Variable),
(r',|:[^=]', Punctuation),
(r'(in|not|null|out|access)\b', Keyword.Reserved),
include('root'),
],
'package': [
('body', Keyword.Declaration),
(r'is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
(r'\(', Punctuation, 'package_instantiation'),
(r'([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
(r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)),
(r'[\w.\'"]', Text),
(r'\)', Punctuation, '#pop'),
include('root'),
],
}
| apache-2.0 | 5,213,161,020,567,336,000 | 49.700311 | 88 | 0.49879 | false |
JustArchi/program-y | src/programy/dialog.py | 2 | 7300 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
class Sentence(object):
def __init__(self, text: str=None, split_chars: str=" "):
self._words = self._split_into_words(text, split_chars)
self._response = None
self._matched_context = None
@property
def words(self):
return self._words
def append_word(self, word):
self._words.append(word)
def append_sentence(self, sentence):
for word in sentence._words:
self._words.append(word)
@property
def response(self):
return self._response
@response.setter
def response(self, text: str):
self._response = text
@property
def matched_context(self):
return self._matched_context
@matched_context.setter
def matched_context(self, context):
self._matched_context = context
def num_words(self):
return len(self.words)
def word(self, num: int):
if num < self.num_words():
return self.words[num]
else:
raise Exception("Num word array violation !")
def words_from_current_pos(self, current_pos: int):
if len(self._words) > 0:
return " ".join(self._words[current_pos:])
else:
# return ""
raise Exception("Num word array violation !")
def text(self):
return " ".join(self._words)
def _split_into_words(self, sentence, split_chars: str):
if sentence is None:
return []
else:
sentence = sentence.strip()
if len(sentence) == 0:
return []
else:
return sentence.split(split_chars)
class Question(object):
@staticmethod
def create_from_text(text: str, sentence_split_chars: str=".", word_split_chars: str=" "):
question = Question()
question._split_into_sentences(text, sentence_split_chars, word_split_chars)
return question
@staticmethod
def create_from_sentence(sentence: Sentence):
question = Question()
question.sentences.append(sentence)
return question
@staticmethod
def create_from_question(question):
new_question = Question()
for each_sentence in question.sentences:
new_question.sentences.append(each_sentence)
return new_question
def __init__(self):
self._sentences = []
self._predicates = {}
@property
def sentences(self):
return self._sentences
def set_predicate(self, name: str, value: str):
self._predicates[name] = value
def predicate(self, name: str):
if name in self._predicates:
return self._predicates[name]
else:
return None
def sentence(self, num: int):
if num < len(self._sentences):
return self._sentences[num]
else:
raise Exception("Num sentence array violation !")
def current_sentence(self):
if len(self._sentences) == 0:
raise Exception("Num sentence array violation !")
else:
return self._sentences[-1]
def previous_sentence(self, num):
if len(self._sentences) < num:
raise Exception("Num sentence array violation !")
else:
return self._sentences[len(self._sentences)-num]
def combine_sentences(self):
return ". ".join([sentence.text() for sentence in self._sentences])
def combine_answers(self):
return ". ".join([sentence.response for sentence in self.sentences if sentence.response is not None])
def _split_into_sentences(self, text: str, sentence_split_chars: str, word_split_chars: str):
if text is not None and len(text.strip()) > 0:
self._sentences = []
all_sentences = text.split(sentence_split_chars)
for each_sentence in all_sentences:
self._sentences.append(Sentence(each_sentence, word_split_chars))
#
# A Conversation is made up of questions, each question is made up of sentences
#
class Conversation(object):
def __init__(self, clientid: str, bot: object, max_histories=100):
self._bot = bot
self._clientid = clientid
self._questions = []
self._max_histories = max_histories
self._predicates = {}
self._predicates['topic'] = '*'
@property
def bot(self):
return self._bot
@property
def clientid(self):
return self._clientid
@property
def questions(self):
return self._questions
# 1 indexed, not 0 indexed, 1st question is nth_question(1)
def nth_question(self, num: int):
if num <= len(self._questions):
question_num = len(self._questions)-num
return self._questions[question_num]
else:
raise Exception("Invalid question index")
def current_question(self):
if len(self._questions) > 0:
return self._questions[-1]
else:
raise Exception("Invalid question index")
def all_sentences(self):
sentences = []
for question in self._questions:
for sentence in question.sentences:
sentences.append(sentence.text())
return sentences
def nth_sentence(self, num: int):
sentences = self.all_sentences()
if num <= len(sentences):
return sentences[len(sentences)-num]
else:
raise Exception("Invalid sentence index")
def set_predicate(self, name: str, value: str):
if name == 'topic':
if value == "":
value = '*'
self._predicates[name] = value
def predicate(self, name: str):
if self._predicates is not None:
if name in self._predicates:
return self._predicates[name]
return None
def record_dialog(self, question: Question):
if len(self._questions) == self._max_histories:
logging.info("Conversation history at max [%d], removing oldest", self._max_histories)
self._questions.remove(self._questions[0])
self._questions.append(question)
def pop_dialog(self):
if len(self._questions) > 0:
self._questions.pop()
| mit | 4,281,058,491,372,725,000 | 31.15859 | 126 | 0.619726 | false |
betterlife/flask-psi | tests/views/sales_order_test.py | 2 | 5799 | import random
from flask import url_for
from six import iteritems
from tests import fixture
from psi.app import const
from psi.app.utils import db_util, calc_inline_field_name
from tests.base_test_case import BaseTestCase
from tests.object_faker import object_faker as of
class TestSalesOrderPages(BaseTestCase):
def test_sales_order_list_create_page(self):
user, password = of.user(
role_names=['direct_sales_order_create', 'direct_sales_order_view']
)
db_util.save_objects_commit(user)
fixture.login_user(self.test_client, user.email, password)
self.assertPageRendered(endpoint=url_for('salesorder.index_view'))
self.assertPageRendered(endpoint=url_for('salesorder.create_view'))
fixture.logout_user(self.test_client)
# The follow test case will fail according to bug BE-325
# user, password = of.user(
# role_names = ['franchise_sales_order_create', 'franchise_sales_order_view']
# )
# db_util.save_objects_commit(user)
# fixture.login_user(self.test_client, user.email, password)
# self.assertPageRendered(endpoint=url_for('salesorder.index_view'))
# self.assertPageRendered(endpoint=url_for('salesorder.create_view'))
def test_update_sales_order(self):
data, expect = self.create_sales_order(status_key=const.SO_CREATED_STATUS_KEY)
customer, delivered, total = expect[0], expect[1], float(expect[4])
self.assertPageRendered(expect_contents=expect,
endpoint=self.edit_endpoint(view='salesorder'))
new_remark = of.faker.text(max_nb_chars=50)
new_logistic_amount = random.randint(0, 100)
new_order_date = of.faker.date_time_this_year()
new_expect = [customer, delivered, str(new_logistic_amount), str(total),
new_order_date.strftime("%Y-%m-%d"), new_remark]
data['logistic_amount'] = new_logistic_amount
data['order_date'] = new_order_date
data['remark'] = new_remark
new_data = dict()
for k,v in iteritems(data):
if k.startswith('lines-') is False:
new_data[k] = v
self.assertPageRendered(method=self.test_client.post,
endpoint=url_for('salesorder.edit_view',
url=url_for(
'salesorder.index_view'),
id=1),
data=data, expect_contents=new_expect)
def test_delete_sales_order(self):
data, expect = self.create_sales_order(status_key=const.SO_CREATED_STATUS_KEY)
self.assertDeleteSuccessful(endpoint=url_for('salesorder.delete_view',
id= 1,
url=url_for('salesorder.index_view')),
deleted_data=expect)
def test_create_sales_order(self):
self.create_sales_order(status_key=const.SO_DELIVERED_STATUS_KEY)
def create_sales_order(self, status_key):
from psi.app.models import EnumValues
from psi.app.services.purchase_order import PurchaseOrderService
user, password = of.user(
role_names=['direct_sales_order_create',
'direct_sales_order_view',
'direct_sales_order_edit',
'direct_sales_order_delete']
)
db_util.save_objects_commit(user)
fixture.login_as_admin(self.test_client)
fixture.login_user(self.test_client, user.email, password)
direct_po = EnumValues.get(const.DIRECT_PO_TYPE_KEY)
po = of.purchase_order(number_of_line=2, type=direct_po,
creator=user)
po.status = EnumValues.get(const.PO_ISSUED_STATUS_KEY)
fixture.login_as_admin(self.test_client)
l_e, g_e, recv = PurchaseOrderService.create_expense_receiving(po)
customer = of.customer(creator=user)
db_util.save_objects_commit(po, l_e, g_e, recv, customer)
fixture.logout_user(self.test_client)
fixture.login_user(self.test_client, user.email, password)
order_status = EnumValues.get(status_key)
order_date = of.faker.date_time_this_year()
logistic_amount = random.randint(0, 100)
remark = of.faker.text(max_nb_chars=50)
data = dict(customer=customer.id, status=order_status.id,
order_date=order_date, logistic_amount=logistic_amount,
remark=remark)
total, data = self.prepare_so_lines_data_from_po(po, data)
expect = [customer.name, order_status.display,
order_date.strftime("%Y-%m-%d"), remark, str(total)]
self.assertPageRendered(method=self.test_client.post, data=data,
endpoint=self.create_endpoint(view='salesorder'),
expect_contents=expect)
return data, expect
def prepare_so_lines_data_from_po(self, po, data):
total = 0
po_lines = po.lines
for i in range(2):
data[calc_inline_field_name(i, 'id')] = i + 1
data[calc_inline_field_name(i, 'product')] = po_lines[
i].product.id
data[calc_inline_field_name(i, 'unit_price')] = po_lines[
i].product.retail_price
data[calc_inline_field_name(i, 'quantity')] = random.randint(1,
20)
total += data[calc_inline_field_name(i, 'unit_price')] * data[
calc_inline_field_name(i, 'quantity')]
return total, data
| mit | -1,449,102,264,506,841,900 | 48.144068 | 91 | 0.580617 | false |
rmaz/buck | programs/buck_package.py | 4 | 8319 | # Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import contextlib
import errno
import json
import os
import shutil
import stat
import tempfile
import pkg_resources
import file_locks
from buck_tool import BuckTool, Resource
SERVER = Resource("buck_server")
BOOTSTRAPPER = Resource("bootstrapper_jar")
BUCK_BINARY_HASH = Resource("buck_binary_hash")
PEX_ONLY_EXPORTED_RESOURCES = [Resource("external_executor_jar")]
MODULES_DIR = "buck-modules"
MODULES_RESOURCES_DIR = "buck-modules-resources"
@contextlib.contextmanager
def closable_named_temporary_file(*args, **kwargs):
"""
Due to a bug in python (https://bugs.python.org/issue14243), we need to be able to close() the
temporary file without deleting it.
"""
fp = tempfile.NamedTemporaryFile(*args, delete=False, **kwargs)
try:
with fp:
yield fp
finally:
try:
os.remove(fp.name)
except OSError as e:
# It's possible this fails because of a race with another buck
# instance has removed the entire resource_path, so ignore
# 'file not found' errors.
if e.errno != errno.ENOENT:
raise
class BuckPackage(BuckTool):
def __init__(self, buck_project, buck_reporter):
super(BuckPackage, self).__init__(buck_project, buck_reporter)
self._resource_subdir = None
self._lock_file = None
def _get_package_info(self):
return json.loads(pkg_resources.resource_string(__name__, "buck_package_info"))
def _get_buck_git_commit(self):
return self._get_buck_version_uid()
def _get_resource_dir(self):
if self._use_buckd:
base_dir = self._buck_project.buckd_dir
else:
base_dir = self._tmp_dir
return os.path.join(base_dir, "resources")
def _get_resource_subdir(self):
def try_subdir(lock_file_dir):
self.__create_dir(lock_file_dir)
lock_file_path = os.path.join(lock_file_dir, file_locks.BUCK_LOCK_FILE_NAME)
lock_file = open(lock_file_path, "a+")
if file_locks.acquire_shared_lock(lock_file):
return lock_file
else:
return None
if self._resource_subdir is None:
buck_version_uid = self._get_buck_version_uid()
resource_dir = self._get_resource_dir()
subdir = os.path.join(resource_dir, buck_version_uid)
self._lock_file = try_subdir(subdir)
if self._lock_file:
self._resource_subdir = subdir
else:
subdir = tempfile.mkdtemp(dir=resource_dir, prefix=buck_version_uid)
self._lock_file = try_subdir(subdir)
if not self._lock_file:
raise Exception(
"Could not acquire lock in fresh tmp dir: " + subdir
)
self._resource_subdir = subdir
return self._resource_subdir
def __create_dir(self, dir):
try:
os.makedirs(dir)
except OSError as ex:
# Multiple threads may try to create this at the same time, so just swallow the
# error if is about the directory already existing.
if ex.errno != errno.EEXIST:
raise
def _get_resource_lock_path(self):
return os.path.join(self._get_resource_subdir(), file_locks.BUCK_LOCK_FILE_NAME)
def _has_resource(self, resource):
return pkg_resources.resource_exists(__name__, resource.name)
def _get_resource(self, resource):
resource_path = os.path.join(self._get_resource_subdir(), resource.basename)
if not os.path.exists(os.path.dirname(resource_path)):
self.__create_dir(os.path.dirname(resource_path))
if not os.path.exists(resource_path):
self._unpack_resource(resource_path, resource.name, resource.executable)
return resource_path
def _unpack_resource(self, resource_path, resource_name, resource_executable):
if not pkg_resources.resource_exists(__name__, resource_name):
return
if pkg_resources.resource_isdir(__name__, resource_name):
self.__create_dir(resource_path)
for f in pkg_resources.resource_listdir(__name__, resource_name):
if f == "":
# TODO(beng): Figure out why this happens
continue
# TODO: Handle executable resources in directory
self._unpack_resource(
os.path.join(resource_path, f),
os.path.join(resource_name, f),
False,
)
else:
with closable_named_temporary_file(
prefix=resource_path + os.extsep
) as outf:
outf.write(pkg_resources.resource_string(__name__, resource_name))
if resource_executable and hasattr(os, "fchmod"):
st = os.fstat(outf.fileno())
os.fchmod(outf.fileno(), st.st_mode | stat.S_IXUSR)
outf.close()
shutil.copy(outf.name, resource_path)
def _get_extra_java_args(self):
modules_dir = os.path.join(self._resource_subdir, MODULES_DIR)
module_resources_dir = os.path.join(
self._resource_subdir, "buck-modules-resources"
)
return [
"-Dbuck.git_dirty=0",
"-Dbuck.path_to_python_dsl=",
"-Dpf4j.pluginsDir={}".format(modules_dir),
"-Dbuck.mode=package",
"-Dbuck.module.resources={}".format(module_resources_dir),
]
def _get_exported_resources(self):
return (
super(BuckPackage, self)._get_exported_resources()
+ PEX_ONLY_EXPORTED_RESOURCES
)
def _get_bootstrap_classpath(self):
return self._get_resource(BOOTSTRAPPER)
def _get_java_classpath(self):
return self._get_resource(SERVER)
def _get_buck_binary_hash(self):
with open(self._get_resource(BUCK_BINARY_HASH), "r") as buck_binary_hash_file:
return buck_binary_hash_file.read().strip()
def _unpack_modules(self):
self._unpack_dir(
MODULES_DIR, os.path.join(self._get_resource_subdir(), MODULES_DIR)
)
self._unpack_dir(
MODULES_RESOURCES_DIR,
os.path.join(self._get_resource_subdir(), MODULES_RESOURCES_DIR),
)
def _unpack_dir(self, resource_dir, dst_dir):
if not pkg_resources.resource_exists(__name__, resource_dir):
raise Exception(
"Cannot unpack directory: {0} doesn't exist in the package".format(
resource_dir
)
)
if not pkg_resources.resource_isdir(__name__, resource_dir):
raise Exception(
"Cannot unpack directory: {0} is not a directory".format(resource_dir)
)
self.__create_dir(dst_dir)
if not os.path.exists(dst_dir):
raise Exception(
"Cannot unpack directory: cannot create directory {0}".format(dst_dir)
)
for resource_file in pkg_resources.resource_listdir(__name__, resource_dir):
resource_path = os.path.join(dst_dir, resource_file)
if os.path.exists(resource_path):
continue
self._unpack_resource(
resource_path, "/".join((resource_dir, resource_file)), False
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._lock_file:
self._lock_file.close()
self._lock_file = None
| apache-2.0 | 8,315,669,822,247,099,000 | 35.169565 | 98 | 0.591417 | false |
seberg/numpy | numpy/typing/tests/data/pass/simple.py | 10 | 2690 | """Simple expression that should pass with mypy."""
import operator
import numpy as np
from typing import Iterable # noqa: F401
# Basic checks
array = np.array([1, 2])
def ndarray_func(x):
# type: (np.ndarray) -> np.ndarray
return x
ndarray_func(np.array([1, 2]))
array == 1
array.dtype == float
# Dtype construction
np.dtype(float)
np.dtype(np.float64)
np.dtype(None)
np.dtype("float64")
np.dtype(np.dtype(float))
np.dtype(("U", 10))
np.dtype((np.int32, (2, 2)))
# Define the arguments on the previous line to prevent bidirectional
# type inference in mypy from broadening the types.
two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")]
np.dtype(two_tuples_dtype)
three_tuples_dtype = [("R", "u1", 2)]
np.dtype(three_tuples_dtype)
mixed_tuples_dtype = [("R", "u1"), ("G", np.unicode_, 1)]
np.dtype(mixed_tuples_dtype)
shape_tuple_dtype = [("R", "u1", (2, 2))]
np.dtype(shape_tuple_dtype)
shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.unicode_, 1)]
np.dtype(shape_like_dtype)
object_dtype = [("field1", object)]
np.dtype(object_dtype)
np.dtype((np.int32, (np.int8, 4)))
# Dtype comparision
np.dtype(float) == float
np.dtype(float) != np.float64
np.dtype(float) < None
np.dtype(float) <= "float64"
np.dtype(float) > np.dtype(float)
np.dtype(float) >= np.dtype(("U", 10))
# Iteration and indexing
def iterable_func(x):
# type: (Iterable) -> Iterable
return x
iterable_func(array)
[element for element in array]
iter(array)
zip(array, array)
array[1]
array[:]
array[...]
array[:] = 0
array_2d = np.ones((3, 3))
array_2d[:2, :2]
array_2d[..., 0]
array_2d[:2, :2] = 0
# Other special methods
len(array)
str(array)
array_scalar = np.array(1)
int(array_scalar)
float(array_scalar)
# currently does not work due to https://github.com/python/typeshed/issues/1904
# complex(array_scalar)
bytes(array_scalar)
operator.index(array_scalar)
bool(array_scalar)
# comparisons
array < 1
array <= 1
array == 1
array != 1
array > 1
array >= 1
1 < array
1 <= array
1 == array
1 != array
1 > array
1 >= array
# binary arithmetic
array + 1
1 + array
array += 1
array - 1
1 - array
array -= 1
array * 1
1 * array
array *= 1
nonzero_array = np.array([1, 2])
array / 1
1 / nonzero_array
float_array = np.array([1.0, 2.0])
float_array /= 1
array // 1
1 // nonzero_array
array //= 1
array % 1
1 % nonzero_array
array %= 1
divmod(array, 1)
divmod(1, nonzero_array)
array ** 1
1 ** array
array **= 1
array << 1
1 << array
array <<= 1
array >> 1
1 >> array
array >>= 1
array & 1
1 & array
array &= 1
array ^ 1
1 ^ array
array ^= 1
array | 1
1 | array
array |= 1
# unary arithmetic
-array
+array
abs(array)
~array
# Other methods
np.array([1, 2]).transpose()
| bsd-3-clause | -5,487,341,036,122,079,000 | 15.30303 | 79 | 0.648327 | false |
testvidya11/ejrf | questionnaire/migrations/0037_auto__add_field_question_is_required.py | 1 | 19129 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.is_required'
db.add_column(u'questionnaire_question', 'is_required',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.is_required'
db.delete_column(u'questionnaire_question', 'is_required')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answergroup'", 'null': 'True', 'to': "orm['questionnaire.Answer']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_groups'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DateField', [], {'null': 'True'})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionOption']", 'null': 'True'})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '9', 'decimal_places': '2'})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short_instruction': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.questiongroup': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroup'},
'allow_multiples': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_group'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}),
'question': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'question_group'", 'symmetrical': 'False', 'to': "orm['questionnaire.Question']"}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.questiongrouporder': {
'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroupOrder'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'to': "orm['questionnaire.Question']"}),
'question_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'questionnaire.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'UID': ('django.db.models.fields.CharField', [], {'max_length': '6', 'unique': 'True', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['questionnaire.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'ordering': "('order',)", 'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.subsection': {
'Meta': {'ordering': "('order',)", 'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.supportdocument': {
'Meta': {'object_name': 'SupportDocument'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'support_documents'", 'to': "orm['questionnaire.Questionnaire']"})
},
'questionnaire.textanswer': {
'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Organization']", 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Region']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['questionnaire'] | bsd-3-clause | 2,539,421,261,217,425,400 | 82.173913 | 190 | 0.566679 | false |
baoboa/pizza-lammps | src/pizza.py | 2 | 13330 | #!/usr/local/bin/python -i
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# Change log:
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): except hook for Tkinter no-display error
# 5/11, David Hart (SNL): began list of excludes for no-display machines
# ToDo list:
# Help strings:
version = "11 Sep 2013"
intro = """
Pizza.py (%s), a toolkit written in Python
type ? for help, CTRL-D to quit
"""
help = """
pizza.py switch arg(s) switch arg(s) ...
-s silent (else print start-up help)
-t log dump raster load only these tools
-x raster rasmol load all tools except these
-f mine.py arg1 arg2 run script file with args
-c "vec = range(100)" run Python command
-q quit (else interactive)
Everything typed at the ">" prompt is a Python command
Additional commands available at ">" prompt:
? print help message
?? one-line for each tool and script
? raster list tool commands or script syntax
?? energy.py full documentation of tool or script
!ls -l shell command
@cd .. cd to a new directory
@log tmp.log log all commands typed so far to file
@run block.py arg1 arg2 run script file with args
@time d = dump("*.dump") time a command
Tools:
"""
# -------------------------------------------------------------------------
# modules needed by pizza.py
import sys, commands, os, string, exceptions, glob, re
from time import clock
# readline not available in all Pythons
try:
import readline
readline_flag = 1
except ImportError, exception:
print "readline option not available"
readline_flag = 0
# create global Tk root if Tkinter is loaded
# used by all tools that do GUIs via Tkinter
nodisplay = False
try:
import Tkinter
tkroot = Tkinter.Tk()
tkroot.withdraw()
except ImportError, exception:
nodisplay = True
pass
except Exception, exception:
nodisplay = True
pass
# -------------------------------------------------------------------------
# error trap that enables special commands at interactive prompt
def trap(type,value,tback):
global argv
# only check SyntaxErrors
if not isinstance(value,exceptions.SyntaxError):
sys.__excepthook__(type,value,tback)
return
# special commands at top level only, not in indented text entry
if value.text[0].isspace():
sys.__excepthook__(type,value,tback)
return
# ? = top-level help
# ?? = one-line description of each tool and script
# ? name = one-line for each tool command or script purpose/syntax
# ?? name = entire documentation for tool or script
# name with no .py suffix is tool, name with .py suffix is script
if value.text[0] == "?":
words = value.text.split()
if len(words) == 1 and words[0] == "?":
print intro[1:] % version
print help[1:]," ",
for tool in tools: print tool,
print
elif len(words) == 1 and words[0] == "??":
for tool in tools:
exec "oneline = oneline_%s" % tool
print "%-11s%s" % (tool,oneline)
print
scripts = []
for dir in PIZZA_SCRIPTS[1:]:
list = glob.glob("%s/*.py" % dir)
list.sort()
scripts += list
for script in scripts:
filename = os.path.basename(script)
lines = open(script,'r').readlines()
flag = 0
for line in lines:
if line.find("Purpose:") >= 0:
flag = 1
break
if flag: doc = line[line.find("Purpose:")+8:]
else: doc = " not available\n"
print "%-20s%s" % (filename,doc),
elif len(words) == 2 and words[0] == "?":
if words[1][-3:] == ".py":
fileflag = 0
for dir in PIZZA_SCRIPTS:
filename = "%s/%s" % (dir,words[1])
if os.path.isfile(filename):
fileflag = 1
lineflag = 0
lines = open(filename,'r').readlines()
for line in lines:
if line.find("# Purpose:") >= 0: print line[2:],
if line.find("# Syntax:") >= 0:
lineflag = 1
break
if not lineflag: print "%s has no Syntax line" % words[1]
else: print line[2:],
break
if not fileflag:
print "%s is not a recognized script" % words[1]
else:
if words[1] in tools:
exec "txt = docstr_%s" % words[1]
txt = re.sub("\n\s*\n","\n",txt)
txt = re.sub("\n .*","",txt)
exec "print oneline_%s" % words[1]
print txt
else:
print "%s is not a recognized tool" % words[1]
elif len(words) == 2 and words[0] == "??":
if words[1][-3:] == ".py":
fileflag = 0
for dir in PIZZA_SCRIPTS:
filename = "%s/%s" % (dir,words[1])
if os.path.isfile(filename):
fileflag = 1
lines = open(filename,'r').readlines()
for line in lines:
if len(line.strip()) == 0: continue
if line[0] == '#': print line,
else: break
break
if not fileflag:
print "%s is not a recognized script" % words[1]
else:
if words[1] in tools:
exec "print oneline_%s" % words[1]
exec "print docstr_%s" % words[1]
else:
print "%s is not a recognized class" % words[1]
return
# shell command like !ls, !ls -l
if value.text[0] == "!":
os.system(value.text[1:])
return
# @ commands = @cd, @log, @run, @time
# for run and time, use namespace in execfile and exec commands
# else variables defined in script/command
# won't be set in top-level Pizza.py
if value.text[0] == "@":
words = value.text.split()
if words[0][1:] == "cd":
os.chdir(words[1])
return
elif words[0][1:] == "log":
if readline_flag == 0:
print "cannot use @log without readline module"
return
f = open(words[1],"w")
print >>f,"# pizza.py log file\n"
nlines = readline.get_current_history_length()
for i in xrange(1,nlines):
print >>f,readline.get_history_item(i)
f.close()
return
elif words[0][1:] == "run":
argv = words[1:]
file = argv[0]
flag = 0
for dir in PIZZA_SCRIPTS:
fullfile = dir + '/' + file
if os.path.exists(fullfile):
flag = 1
print "Executing file:",fullfile
execfile(fullfile,namespace)
break
if not flag: print "Could not find file",file
return
elif words[0][1:] == "time":
cmd = string.join(words[1:])
t1 = clock()
exec cmd in namespace
t2 = clock()
print "CPU time = ",t2-t1
return
# unrecognized command, let system handle error
sys.__excepthook__(type,value,tback)
# -------------------------------------------------------------------------
# process command-line switches
# store scripts and commands in tasks list
silent = 0
yes_tools = []
no_tools = []
tasks = []
quitflag = 0
iarg = 1
while (iarg < len(sys.argv)):
if (sys.argv[iarg][0] != '-'):
print "ERROR: arg is not a switch: %s" % (sys.argv[iarg])
sys.exit()
if (sys.argv[iarg] == "-s"):
silent = 1
iarg += 1
elif (sys.argv[iarg] == "-t"):
jarg = iarg + 1
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
yes_tools.append(sys.argv[jarg])
jarg += 1
iarg = jarg
elif (sys.argv[iarg] == "-x"):
jarg = iarg + 1
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
no_tools.append(sys.argv[jarg])
jarg += 1
iarg = jarg
# allow for "--" as arg to script and not Pizza.py arg
elif (sys.argv[iarg] == "-f"):
jarg = iarg + 1
list = []
while (jarg < len(sys.argv) and
(sys.argv[jarg][0] != '-' or
(len(sys.argv[jarg]) >= 3 and sys.argv[jarg][0:2] == "--"))):
list.append(sys.argv[jarg])
jarg += 1
task = ("script",list)
tasks.append(task)
iarg = jarg
elif (sys.argv[iarg] == "-c"):
jarg = iarg + 1
list = []
while (jarg < len(sys.argv) and sys.argv[jarg][0] != '-'):
list.append(sys.argv[jarg])
jarg += 1
task = ("command",list)
tasks.append(task)
iarg = jarg
elif (sys.argv[iarg] == "-q"):
quitflag = 1
iarg += 1
else:
print "ERROR: unknown switch: %s" % (sys.argv[iarg])
sys.exit()
# print intro message
if not silent: print intro[1:] % version,
# error test on m,x command-line switches
if len(yes_tools) > 0 and len(no_tools) > 0:
print "ERROR: cannot use -t and -x switches together"
sys.exit()
# -------------------------------------------------------------------------
# tools = list of tool names to import
# if -t switch was used, tools = just those files
# else scan for *.py files in all dirs in PIZZA_TOOLS list
# and then Pizza.py src dir (sys.path[0])
if not silent: print "Loading tools ..."
if not silent and nodisplay: print "Display not available ... no GUIs"
try: from DEFAULTS import PIZZA_TOOLS
except: PIZZA_TOOLS = []
PIZZA_TOOLS = map(os.path.expanduser,PIZZA_TOOLS)
PIZZA_TOOLS.append(sys.path[0])
if len(yes_tools) > 0: tools = yes_tools
else:
tools = []
for dir in PIZZA_TOOLS:
tools += glob.glob(dir + "/*.py")
for i in range(len(tools)):
tools[i] = os.path.basename(tools[i])
tools[i] = tools[i][:-3]
# remove duplicate entries, reverse enables removing all but first entry
tools.reverse()
for tool in tools:
while tools.count(tool) > 1: tools.remove(tool)
tools.reverse()
# remove tools in EXCLUDE list and command-line -x list
try: from DEFAULTS import PIZZA_EXCLUDE
except: PIZZA_EXCLUDE = []
for tool in PIZZA_EXCLUDE:
if tool in tools: tools.remove(tool)
for tool in no_tools:
if tool in tools: tools.remove(tool)
# add PIZZA_TOOLS dirs to front of module search path (sys.path)
# import each tool as a Python module and its documentation strings
# restore sys.path
sys.path = PIZZA_TOOLS + sys.path
failed = []
for tool in tools:
#print "loading tool '%s'"%tool
if nodisplay and tool in ['gl']:
failed.append(tool)
continue
try:
exec "from %s import %s" % (tool,tool)
exec "from %s import oneline as oneline_%s" % (tool,tool)
exec "from %s import docstr as docstr_%s" % (tool,tool)
except Exception, exception:
print "%s tool did not load:" % tool
print " ",exception
failed.append(tool)
for dir in PIZZA_TOOLS: sys.path = sys.path[1:]
# final list of tools: remove tools where import failed, sort them
for tool in failed: tools.remove(tool)
tools.sort()
# add current working dir to sys.path so user can import own modules
# cwd isn't in sys.path when Pizza.py is launched
sys.path.insert(0,'')
# -------------------------------------------------------------------------
# PIZZA_SCRIPTS = list of dirs to look in to find scripts
try: from DEFAULTS import PIZZA_SCRIPTS
except: PIZZA_SCRIPTS = []
PIZZA_SCRIPTS = map(os.path.expanduser,PIZZA_SCRIPTS)
PIZZA_SCRIPTS.insert(0,'.')
PIZZA_SCRIPTS.append(sys.path[1][:-3] + "scripts") # path for pizza.py
# run specified script files and commands in order specified
# put arguments in argv so script can access them
# check list of PIZZA_SCRIPTS dirs to find script file
# catch errors so pizza.py will continue even if script is bad
# traceback logic prints where in the script the error occurred
for task in tasks:
if task[0] == "script":
argv = task[1]
file = argv[0]
try:
flag = 0
for dir in PIZZA_SCRIPTS:
fullfile = dir + '/' + file
if os.path.exists(fullfile):
print "Executing file:",fullfile
execfile(fullfile)
flag = 1
break
if not flag: print "Could not find file",file
except StandardError, exception:
(type,value,tback) = sys.exc_info()
print type,value,tback
type = str(type)
type = type[type.find(".")+1:]
print "%s with value: %s" % (type,value)
tback = tback.tb_next
while tback:
print "error on line %d of file %s" % \
(tback.tb_lineno,tback.tb_frame.f_code.co_filename)
tback = tback.tb_next
elif task[0] == "command":
argv = task[1]
cmd = ""
for arg in argv: cmd += arg + " "
exec cmd
# -------------------------------------------------------------------------
# store global namespace
# swap in a new exception handler
# change interactive prompts
namespace = sys.modules['__main__'].__dict__
sys.excepthook = trap
sys.ps1 = "> "
sys.ps2 = ". "
# should now go interactive if launched with "python -i"
# unless -q switch used
if quitflag > 0: sys.exit()
| gpl-2.0 | 808,714,202,700,126,700 | 28.821029 | 75 | 0.565791 | false |
Serag8/Bachelor | google_appengine/lib/django-1.2/django/views/generic/create_update.py | 65 | 8794 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
| mit | 3,720,483,763,845,306,400 | 39.902326 | 114 | 0.645781 | false |
nicjhan/MOM6-examples | tools/analysis/MOM6_ts_annual.py | 6 | 5031 | # Script to plot sub-surface ocean temperature drift.
# Analysis: using newer python 2.7.3
"""
module purge
module use -a /home/fms/local/modulefiles
module load gcc
module load netcdf/4.2
module load python/2.7.3
"""
import os
import math
import numpy as np
from numpy import ma
from netCDF4 import Dataset, MFDataset, num2date, date2num
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# -----------------------------------------------------------------------------
# Function to convert from page coordinates to non-dimensional coordinates
def page_to_ndc( panel, page ):
if len(panel) == 4:
ndc = [ 0.0, 0.0, 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
ndc[2] = (panel[2]-panel[0])/(page[2]-page[0])
ndc[3] = (panel[3]-panel[1])/(page[3]-page[1])
return ndc
elif len(panel) == 2:
ndc = [ 0.0, 0.0 ]
ndc[0] = (panel[0]-page[0])/(page[2]-page[0])
ndc[1] = (panel[1]-page[1])/(page[3]-page[1])
return ndc
# -----------------------------------------------------------------------------
# Function to discretize colormap with option to white out certain regions
def cmap_discretize(cmap, N, white=None):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
if type(cmap) == str:
cmap = get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
# White levels?
if white != None:
for i in range(N):
if white[i] > 0.0:
colors_rgba[i,:] = 1.0
# Construct colormap distionary
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki,key in enumerate(('red','green','blue')):
cdict[key] = [ (indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in xrange(N+1) ]
# Return colormap object.
return matplotlib.colors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
# -----------------------------------------------------------------------------
# Radius of the earth (shared/constants/constants.F90)
radius = 6371.0e3
# Ocean heat capacity (ocean_core/ocean_parameters.F90)
cp_ocean = 3992.10322329649
# Read 'descriptor' and 'years' from external file
f = open("files.txt")
for line in f.readlines():
exec(line.lstrip())
f.close()
model_label = "%s (%s)" % (descriptor,years)
# TMPDIR where input files are located
tmpdir = "./"
# Open input files
#fstatic = Dataset(tmpdir+'19000101.ocean_geometry.nc', 'r')
fstatic = Dataset(tmpdir+'ocean_annual.static.nc', 'r')
ftemp = MFDataset(tmpdir+'ocean_annual.*.temp.nc')
fsalt = MFDataset(tmpdir+'ocean_annual.*.salt.nc')
# Time info
time = ftemp.variables["time"]
ntimes = len(time[:])
date = num2date(time,time.units,time.calendar.lower())
year = [d.year for d in date]
time_days = date2num(date,'days since 01-01-0001',time.calendar.lower())
# Grid info
#area = fstatic.variables["Ah"][:]
area = fstatic.variables["area_t"][:]
z = ftemp.variables["zl"][:]
nz = len(z)
# Input variables
temp = ftemp.variables["temp"]
salt = fsalt.variables["salt"]
# Create arrays to hold derived variables
ztemp = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
zsalt = ma.array( np.zeros((ntimes,nz), 'float64'), mask=True )
# Loop over time
#for itime in range(ntimes):
for itime in range(1):
# Compute vertical profile of zemperature
tmp = temp[itime,:,:,:]
contmp = salt[itime,:,:,:]
for iz in range(nz):
ztemp[itime,iz] = ma.average(tmp[iz,:,:], weights=area)
zsalt[itime,iz] = ma.average(contmp[iz,:,:], weights=area)
# Transpose for compatibility with contour plots
ztemp = ztemp.transpose()
zsalt = zsalt.transpose()
# Close files
fstatic.close()
ftemp.close()
fsalt.close()
# -----------------------------------------------------------------------------
# Create plot
# Specify plots position in points: [left bottom right top]
page = [ 0.0, 0.0, 612.0, 792.0 ] # corresponding to papertype='letter'
plot1a = [ 89.0, 497.0, 480.0, 670.0 ]
plot1b = [ 89.0, 324.0, 480.0, 497.0 ]
cbar = [ 506.0, 324.0, 531.0, 670.0 ]
plot2 = [ 89.0, 99.0, 480.0, 272.0 ]
plot = [ 89.0, 99.0, 480.0, 670.0 ]
#plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.dpi'] = 72.0
plt.rcParams['figure.figsize'] = [ (page[2]-page[0])/72.0, (page[3]-page[1])/72.0 ]
fig = plt.figure()
ax1a = plt.axes(page_to_ndc(plot,page))
ax1a.set_ylim(5300,0)
ax1a.set_ylabel('Depth (m)')
ax1a.set_xlabel('Ocean Temp (C)',color='r')
ax1a.plot(ztemp,z,ls='-',color='r')
ax1b = ax1a.twiny()
ax1b.set_xlabel('Ocean Salinity (PSU)',color='b')
ax1b.plot(zsalt,z,ls='-',color='b')
# Figure title
xy = page_to_ndc([280.0,730.0],page)
fig.text(xy[0],xy[1],model_label,ha="center",size="x-large")
# Save figure
fig.savefig("ocean_temp.ps")
| gpl-3.0 | -1,526,006,435,967,925,500 | 27.913793 | 98 | 0.601272 | false |
agmscode/agms_python | agms/invoicing.py | 1 | 1824 | from __future__ import absolute_import
from agms.agms import Agms
from agms.request.invoicing_request import InvoicingRequest
from agms.response.invoicing_response import InvoicingResponse
from agms.exception.invalid_request_exception import InvalidRequestException
class Invoicing(Agms):
"""
A class representing AGMS Invoice objects.
"""
def __init__(self):
self.op = None
self._api_url = 'https://gateway.agms.com/roxapi/AGMS_BillPay.asmx'
self._requestObject = InvoicingRequest
self._responseObject = InvoicingResponse
def customer(self, params):
self.op = 'RetrieveCustomerIDList'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def invoice(self, params):
self.op = 'RetrieveInvoices'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def submit(self, params):
self.op = 'SubmitInvoice'
self._reset_parameters()
for param, config in params:
self._set_parameter(param, config)
self._execute()
return self.response.to_array()
def _execute(self):
if self.op == 'RetrieveCustomerIDList':
self._do_connect('RetrieveCustomerIDList', 'RetrieveCustomerIDListResponse')
elif self.op == 'RetrieveInvoices':
self._do_connect('RetrieveInvoices', 'RetrieveInvoicesResponse')
elif self.op == 'SubmitInvoice':
self._do_connect('SubmitInvoice', 'SubmitInvoiceResponse')
else:
raise InvalidRequestException('Invalid request to Invoicing API ' + self.op) | mit | -4,166,055,359,678,810,600 | 34.784314 | 88 | 0.651316 | false |
conejoninja/plugin.video.pelisalacarta | pelisalacarta/channels/filmpertutti.py | 5 | 5117 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para piratestreaming
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "filmpertutti"
__category__ = "F"
__type__ = "generic"
__title__ = "filmpertutti"
__language__ = "IT"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.filmpertutti mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Ultimi film inseriti", action="peliculas", url="http://www.filmpertutti.co/category/film/"))
itemlist.append( Item(channel=__channel__, title="Categorie film", action="categorias", url="http://www.filmxtutti.co/"))
itemlist.append( Item(channel=__channel__, title="Serie TV" , action="peliculas", url="http://www.filmpertutti.co/category/serie-tv/"))
itemlist.append( Item(channel=__channel__, title="Anime Cartoon Italiani", action="peliculas", url="http://www.filmpertutti.co/category/anime-cartoon-italiani/"))
itemlist.append( Item(channel=__channel__, title="Anime Cartoon Sub-ITA", action="peliculas", url="http://www.filmpertutti.co/category/anime-cartoon-sub-ita/"))
itemlist.append( Item(channel=__channel__, title="Cerca...", action="search"))
return itemlist
def categorias(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<div class="new_home_bottom_link">(.*?)</div>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def search(item,texto):
logger.info("[filmpertutti.py] "+item.url+" search "+texto)
item.url = "http://www.filmpertutti.eu/search/"+texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas(item):
logger.info("pelisalacarta.filmpertutti peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
'''
<div class="xboxcontent">
<h2><a href="http://filmpertutti.tv/il-padrino-di-chinatown/" rel="bookmark" title="Il padrino di Chinatown" target=""><img width="192" height="262" src="http://filmpertutti.tv/wp-content/uploads/2012/06/IlpadrinodiChinatown.jpeg" class="attachment-post-thumbnail wp-post-image" alt="IlpadrinodiChinatown" title="IlpadrinodiChinatown"> Il padrino di Chinatown </a> </h2>
<p> ... </p>
</div>
'''
patron = '<div class="xboxcontent">\s*'
patron += '<h2><a href="?([^>"]+)"?.*?title="?([^>"]+)"?.*?<img.*?src="([^>"]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming",""))
if scrapedtitle.startswith("Link to "):
scrapedtitle = scrapedtitle[8:]
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae el paginador
patronvideos = '<a href="([^"]+)" >Avanti</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=__channel__, action="peliculas", title="Next Page >>" , url=scrapedurl , folder=True) )
return itemlist
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los videos de "Novedades" devuelve mirrors
novedades_items = peliculas(mainlist_items[0])
bien = False
for novedades_item in novedades_items:
mirrors = servertools.find_video_items( item=novedades_item )
if len(mirrors)>0:
bien = True
break
return bien
| gpl-3.0 | 773,171,514,831,139,100 | 40.934426 | 402 | 0.648358 | false |
sergiocorato/project-service | project_recalculate/models/project_project.py | 6 | 3161 | # -*- coding: utf-8 -*-
# See README.rst file on addon root folder for license details
from openerp import models, fields, api, _
from openerp.fields import DATE_LENGTH
from openerp.exceptions import Warning
class ProjectProject(models.Model):
_inherit = 'project.project'
calculation_type = fields.Selection(
[('date_begin', 'Date begin'),
('date_end', 'Date end')],
string='Calculation type', default=False,
help='How to calculate tasks, with date start or date end references. '
'If not set, "Recalculate project" button is disabled.')
def _start_end_dates_prepare(self):
"""
Prepare project start or end date, looking into tasks list
and depending on project calculation_type
- if calculation_type == 'date_begin':
project end date = latest date from tasks end dates
- if calculation_type == 'date_end':
project start date = earliest date from tasks start dates
NOTE: Do not perform any write operations to DB
"""
vals = {}
self.ensure_one()
if not self.tasks:
return vals
from_string = fields.Datetime.from_string
# Here we consider all project task, the ones in a stage with
# include_in_recalculate = False and the ones with
# include_in_recalculate = True
start_task = min(self.tasks,
key=lambda t: from_string(t.date_start or t.date_end))
end_task = max(self.tasks,
key=lambda t: from_string(t.date_end or t.date_start))
# Assign min/max dates if available
if self.calculation_type == 'date_begin' and end_task.date_end:
vals['date'] = end_task.date_end[:DATE_LENGTH]
if self.calculation_type == 'date_end' and start_task.date_start:
vals['date_start'] = start_task.date_start[:DATE_LENGTH]
return vals
@api.multi
def project_recalculate(self):
"""
Recalculate project tasks start and end dates.
After that, recalculate new project start or end date
"""
for project in self:
if not project.calculation_type:
raise Warning(_("Cannot recalculate project because your "
"project don't have calculation type."))
if (project.calculation_type == 'date_begin'
and not project.date_start):
raise Warning(_("Cannot recalculate project because your "
"project don't have date start."))
if (project.calculation_type == 'date_end'
and not project.date):
raise Warning(_("Cannot recalculate project because your "
"project don't have date end."))
if project.calculation_type != 'none':
for task in project.tasks:
task.task_recalculate()
vals = project._start_end_dates_prepare()
if vals:
project.write(vals)
return True
| agpl-3.0 | -8,846,634,485,652,405,000 | 42.30137 | 79 | 0.57292 | false |
kenorb-contrib/BitTorrent | twisted/web/distrib.py | 3 | 9522 | # -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Distributed web servers.
This is going to have to be refactored so that argument parsing is done
by each subprocess and not by the main web server (i.e. GET, POST etc.).
"""
# System Imports
import types, os, copy, string, cStringIO
if (os.sys.platform != 'win32') and (os.name != 'java'):
import pwd
# Twisted Imports
from twisted.spread import pb
from twisted.web import http
from twisted.python import log
from twisted.persisted import styles
from twisted.web.woven import page
from twisted.internet import address, reactor
# Sibling Imports
import resource
import server
import error
import html
import static
from server import NOT_DONE_YET
class _ReferenceableProducerWrapper(pb.Referenceable):
def __init__(self, producer):
self.producer = producer
def remote_resumeProducing(self):
self.producer.resumeProducing()
def remote_pauseProducing(self):
self.producer.pauseProducing()
def remote_stopProducing(self):
self.producer.stopProducing()
class Request(pb.RemoteCopy, server.Request):
def setCopyableState(self, state):
for k in 'host', 'client':
tup = state[k]
addrdesc = {'INET': 'TCP', 'UNIX': 'UNIX'}[tup[0]]
addr = {'TCP': lambda: address.IPv4Address(addrdesc,
tup[1], tup[2],
_bwHack='INET'),
'UNIX': lambda: address.UNIXAddress(tup[1])}[addrdesc]()
state[k] = addr
pb.RemoteCopy.setCopyableState(self, state)
# Emulate the local request interface --
self.content = cStringIO.StringIO(self.content_data)
self.write = self.remote.remoteMethod('write')
self.finish = self.remote.remoteMethod('finish')
self.setHeader = self.remote.remoteMethod('setHeader')
self.addCookie = self.remote.remoteMethod('addCookie')
self.setETag = self.remote.remoteMethod('setETag')
self.setResponseCode = self.remote.remoteMethod('setResponseCode')
self.setLastModified = self.remote.remoteMethod('setLastModified')
def registerProducer(self, producer, streaming):
self.remote.callRemote("registerProducer",
_ReferenceableProducerWrapper(producer),
streaming).addErrback(self.fail)
def unregisterProducer(self):
self.remote.callRemote("unregisterProducer").addErrback(self.fail)
def fail(self, failure):
log.err(failure)
pb.setCopierForClass(server.Request, Request)
class Issue:
def __init__(self, request):
self.request = request
def finished(self, result):
if result != NOT_DONE_YET:
assert isinstance(result, types.StringType),\
"return value not a string"
self.request.write(result)
self.request.finish()
def failed(self, failure):
#XXX: Argh. FIXME.
failure = str(failure)
self.request.write(
error.ErrorPage(http.INTERNAL_SERVER_ERROR,
"Server Connection Lost",
"Connection to distributed server lost:" +
html.PRE(failure)).
render(self.request))
self.request.finish()
log.msg(failure)
class ResourceSubscription(resource.Resource):
isLeaf = 1
waiting = 0
def __init__(self, host, port):
resource.Resource.__init__(self)
self.host = host
self.port = port
self.pending = []
self.publisher = None
def __getstate__(self):
"""Get persistent state for this ResourceSubscription.
"""
# When I unserialize,
state = copy.copy(self.__dict__)
# Publisher won't be connected...
state['publisher'] = None
# I won't be making a connection
state['waiting'] = 0
# There will be no pending requests.
state['pending'] = []
return state
def connected(self, publisher):
"""I've connected to a publisher; I'll now send all my requests.
"""
log.msg('connected to publisher')
publisher.broker.notifyOnDisconnect(self.booted)
self.publisher = publisher
self.waiting = 0
for request in self.pending:
self.render(request)
self.pending = []
def notConnected(self, msg):
"""I can't connect to a publisher; I'll now reply to all pending
requests.
"""
log.msg("could not connect to distributed web service: %s" % msg)
self.waiting = 0
self.publisher = None
for request in self.pending:
request.write("Unable to connect to distributed server.")
request.finish()
self.pending = []
def booted(self):
self.notConnected("connection dropped")
def render(self, request):
"""Render this request, from my server.
This will always be asynchronous, and therefore return NOT_DONE_YET.
It spins off a request to the pb client, and either adds it to the list
of pending issues or requests it immediately, depending on if the
client is already connected.
"""
if not self.publisher:
self.pending.append(request)
if not self.waiting:
self.waiting = 1
bf = pb.PBClientFactory()
timeout = 10
if self.host == "unix":
reactor.connectUNIX(self.port, bf, timeout)
else:
reactor.connectTCP(self.host, self.port, bf, timeout)
d = bf.getRootObject()
d.addCallbacks(self.connected, self.notConnected)
else:
i = Issue(request)
self.publisher.callRemote('request', request).addCallbacks(i.finished, i.failed)
return NOT_DONE_YET
class ResourcePublisher(pb.Root, styles.Versioned):
def __init__(self, site):
self.site = site
persistenceVersion = 2
def upgradeToVersion2(self):
self.application.authorizer.removeIdentity("web")
del self.application.services[self.serviceName]
del self.serviceName
del self.application
del self.perspectiveName
def getPerspectiveNamed(self, name):
return self
def remote_request(self, request):
res = self.site.getResourceFor(request)
log.msg( request )
return res.render(request)
class UserDirectory(page.Page):
userDirName = 'public_html'
userSocketName = '.twistd-web-pb'
template = """
<html>
<head>
<title>twisted.web.distrib.UserDirectory</title>
<style>
a
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
color: #369;
text-decoration: none;
}
th
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
font-weight: bold;
text-decoration: none;
text-align: left;
}
pre, code
{
font-family: "Courier New", Courier, monospace;
}
p, body, td, ol, ul, menu, blockquote, div
{
font-family: Lucida, Verdana, Helvetica, Arial, sans-serif;
color: #000;
}
</style>
<base view="Attributes" model="base" />
</head>
<body>
<h1>twisted.web.distrib.UserDirectory</h1>
<ul view="List" model="directory">
<li pattern="listItem"><a view="Link" /> </li>
</ul>
</body>
</html>
"""
def wmfactory_base(self, request):
return {'href':request.prePathURL()}
def wmfactory_directory(self, request):
m = []
for user in pwd.getpwall():
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell \
= user
realname = string.split(pw_gecos,',')[0]
if not realname:
realname = pw_name
if os.path.exists(os.path.join(pw_dir, self.userDirName)):
m.append({
'href':'%s/'%pw_name,
'text':'%s (file)'%realname
})
twistdsock = os.path.join(pw_dir, self.userSocketName)
if os.path.exists(twistdsock):
linknm = '%s.twistd' % pw_name
m.append({
'href':'%s/'%linknm,
'text':'%s (twistd)'%realname})
return m
def getChild(self, name, request):
if name == '':
return self
td = '.twistd'
if name[-len(td):] == td:
username = name[:-len(td)]
sub = 1
else:
username = name
sub = 0
try:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell \
= pwd.getpwnam(username)
except KeyError:
return error.NoResource()
if sub:
twistdsock = os.path.join(pw_dir, self.userSocketName)
rs = ResourceSubscription('unix',twistdsock)
self.putChild(name, rs)
return rs
else:
path = os.path.join(pw_dir, self.userDirName)
if not os.path.exists(path):
return error.NoResource()
return static.File(path)
| gpl-3.0 | 4,833,659,365,236,759,000 | 30.425743 | 92 | 0.572149 | false |
fengzhanglab/GUIDES | static/data/pre_processed/precompute_guides_cpickle.py | 2 | 4147 | import cPickle as pickle
import json
import os.path
from Queue import PriorityQueue
import re
import doench_score
class GuideRNA():
"""Holder of gRNA information"""
def __init__(self, selected, start, seq, PAM, score, exon_ranking, ensembl_gene, gene_name):
self.start = start
self.seq = seq
self.PAM = PAM
self.score = score
self.exon_ranking = exon_ranking
self.ensembl_gene = ensembl_gene
self.gene_name = gene_name
self.selected = selected
def serialize_for_display(self):
"""Serialize for the way we are returning json"""
return {
"score": self.score,
"start": self.start,
"seq": self.seq,
"PAM": self.PAM,
"selected": self.selected,
}
params = {
"PAM": "NGG",
"protospacer_len": 20,
"prime5": True,
"use_Doench": True,
"quantity": 100
}
modPAM = params["PAM"].upper()
modPAM = modPAM.replace('N', '[ATCG]')
params["modPAM"] = modPAM
params["PAM_len"] = len(params["PAM"])
revcompl = lambda x: ''.join([{'A':'T','C':'G','G':'C','T':'A','N':'N'}[B] for B in x][::-1])
def gene_exon_file(gene, exon):
filename = gene + "_" + str(exon)
seq_path = os.path.join('../GRCh37_exons/', filename)
if os.path.isfile(seq_path):
with open(seq_path) as infile:
return infile.read()
else:
return None
with open('genes_list.json') as genes_list_file:
genes_list = json.load(genes_list_file)
# gene format: {"ensembl_id": "ENSG00000261122.2", "name": "5S_rRNA", "description": ""}
for gene in genes_list:
exon = 0
seq = gene_exon_file(gene["ensembl_id"], exon)
while seq:
q = PriorityQueue()
def process_guide(m, selected, max_queue_size, seq):
PAM_start = m.start()
score = 0
if params["use_Doench"]:
# Doench score requires the 4 before and 6 after 20-mer (gives 30-mer)
mer30 = seq[PAM_start-params["protospacer_len"]-4:PAM_start+params["PAM_len"]+3]
if len(mer30) == 30:
score = doench_score.calc_score(mer30)
protospacer = ""
PAM = ""
if params["prime5"]:
protospacer = seq[PAM_start-params["protospacer_len"]:PAM_start]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
else:
protospacer = seq[PAM_start+params["PAM_len"]:PAM_start+params["PAM_len"]+params["protospacer_len"]]
PAM = seq[PAM_start:PAM_start+params["PAM_len"]]
potential_gRNA = GuideRNA(selected, PAM_start-params["protospacer_len"], protospacer, PAM, score, exon, gene["ensembl_id"], gene["name"])
# If there's enough room, add it, no question.
if q.qsize() < max_queue_size:
q.put(potential_gRNA)
# Otherwise, take higher score
else:
lowest_gRNA = q.get()
if potential_gRNA.score > lowest_gRNA.score:
q.put(potential_gRNA)
else:
q.put(lowest_gRNA)
for m in re.finditer(params["modPAM"], seq):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
process_guide(m, True, params["quantity"], seq)
seq_rc = revcompl(seq)
for m in re.finditer(params["modPAM"], seq_rc):
if params["prime5"] and (m.start() < params["protospacer_len"] or m.start() + params["PAM_len"] > len(seq)):
continue
elif not params["prime5"] and (m.start() + params["PAM_len"] + params["protospacer_len"] > len(seq)):
continue
process_guide(m, True, params["quantity"], seq_rc)
# Pop gRNAs into our 'permanent' storage
gRNAs = []
while not q.empty():
gRNA = q.get()
gRNAs.append(gRNA)
outfile_name = gene["ensembl_id"] + "_" + str(exon) + ".p"
output_path = os.path.join('../GRCh37_guides_cpickle/', outfile_name)
with open(output_path, 'w') as outfile:
pickle.dump(gRNAs, outfile)
# prepare next exon
exon += 1
seq = gene_exon_file(gene["ensembl_id"], exon)
| agpl-3.0 | 772,124,631,386,194,000 | 32.991803 | 145 | 0.591994 | false |
wummel/patool | tests/archives/test_plzip.py | 1 | 1251 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import ArchiveTest, Content
from .. import needs_program
class TestPlzip (ArchiveTest):
program = 'plzip'
@needs_program(program)
def test_plzip(self):
self.archive_test('t.txt.lz')
self.archive_extract('t.txt.lz', check=Content.Singlefile)
self.archive_create('t.txt.lz', check=Content.Singlefile)
@needs_program('file')
@needs_program(program)
def test_plzip_file(self):
self.archive_test('t.txt.lz.foo')
self.archive_extract('t.txt.lz.foo', check=Content.Singlefile)
| gpl-3.0 | -7,750,703,512,421,245,000 | 35.794118 | 71 | 0.713829 | false |
ldong/vim_youcompleteme | third_party/jedi/test/test_regression.py | 1 | 4806 | """
Unit tests to avoid errors of the past. These are also all tests that didn't
found a good place in any other testing module.
"""
import os
import textwrap
from .helpers import TestCase, cwd_at
import pytest
import jedi
from jedi._compatibility import u
from jedi import Script
from jedi import api
from jedi.evaluate import imports
from jedi.parser import Parser
#jedi.set_debug_function()
class TestRegression(TestCase):
def test_goto_definition_cursor(self):
s = ("class A():\n"
" def _something(self):\n"
" return\n"
" def different_line(self,\n"
" b):\n"
" return\n"
"A._something\n"
"A.different_line"
)
in_name = 2, 9
under_score = 2, 8
cls = 2, 7
should1 = 7, 10
diff_line = 4, 10
should2 = 8, 10
def get_def(pos):
return [d.description for d in Script(s, *pos).goto_definitions()]
in_name = get_def(in_name)
under_score = get_def(under_score)
should1 = get_def(should1)
should2 = get_def(should2)
diff_line = get_def(diff_line)
assert should1 == in_name
assert should1 == under_score
assert should2 == diff_line
self.assertRaises(jedi.NotFoundError, get_def, cls)
@pytest.mark.skip('Skip for now, test case is not really supported.')
@cwd_at('jedi')
def test_add_dynamic_mods(self):
fname = '__main__.py'
api.settings.additional_dynamic_modules = [fname]
# Fictional module that defines a function.
src1 = "def r(a): return a"
# Other fictional modules in another place in the fs.
src2 = 'from .. import setup; setup.r(1)'
imports.load_module(os.path.abspath(fname), src2)
result = Script(src1, path='../setup.py').goto_definitions()
assert len(result) == 1
assert result[0].description == 'class int'
def test_os_nowait(self):
""" github issue #45 """
s = Script("import os; os.P_").completions()
assert 'P_NOWAIT' in [i.name for i in s]
def test_points_in_completion(self):
"""At some point, points were inserted into the completions, this
caused problems, sometimes.
"""
c = Script("if IndentationErr").completions()
assert c[0].name == 'IndentationError'
self.assertEqual(c[0].complete, 'or')
def test_no_statement_parent(self):
source = textwrap.dedent("""
def f():
pass
class C:
pass
variable = f or C""")
defs = Script(source, column=3).goto_definitions()
defs = sorted(defs, key=lambda d: d.line)
self.assertEqual([d.description for d in defs],
['def f', 'class C'])
def test_end_pos(self):
# jedi issue #150
s = u("x()\nx( )\nx( )\nx ( )")
parser = Parser(s)
for i, s in enumerate(parser.module.statements, 3):
for c in s.expression_list():
self.assertEqual(c.execution.end_pos[1], i)
def check_definition_by_marker(self, source, after_cursor, names):
r"""
Find definitions specified by `after_cursor` and check what found
For example, for the following configuration, you can pass
``after_cursor = 'y)'``.::
function(
x, y)
\
`- You want cursor to be here
"""
source = textwrap.dedent(source)
for (i, line) in enumerate(source.splitlines()):
if after_cursor in line:
break
column = len(line) - len(after_cursor)
defs = Script(source, i + 1, column).goto_definitions()
print(defs)
assert [d.name for d in defs] == names
def test_backslash_continuation(self):
"""
Test that ModuleWithCursor.get_path_until_cursor handles continuation
"""
self.check_definition_by_marker(r"""
x = 0
a = \
[1, 2, 3, 4, 5, 6, 7, 8, 9, x] # <-- here
""", '] # <-- here', ['int'])
# completion in whitespace
s = 'asdfxyxxxxxxxx sds\\\n hello'
assert Script(s, 2, 4).goto_assignments() == []
def test_backslash_continuation_and_bracket(self):
self.check_definition_by_marker(r"""
x = 0
a = \
[1, 2, 3, 4, 5, 6, 7, 8, 9, (x)] # <-- here
""", '(x)] # <-- here', [])
def test_generator(self):
# Did have some problems with the usage of generator completions this
# way.
s = "def abc():\n" \
" yield 1\n" \
"abc()."
assert Script(s).completions()
| gpl-3.0 | 1,023,334,937,124,774,300 | 29.807692 | 78 | 0.542031 | false |
nyuszika7h/youtube-dl | youtube_dl/extractor/vidme.py | 23 | 9894 | from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
parse_iso8601,
url_or_none,
)
class VidmeIE(InfoExtractor):
IE_NAME = 'vidme'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]{,5})(?:[^\da-zA-Z]|$)'
_TESTS = [{
'url': 'https://vid.me/QNB',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
'age_limit': 0,
'duration': 119.92,
'view_count': int,
'like_count': int,
'comment_count': int,
},
}, {
'url': 'https://vid.me/Gc6M',
'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
'info_dict': {
'id': 'Gc6M',
'ext': 'mp4',
'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1441211642,
'upload_date': '20150902',
'uploader': 'SunshineM',
'uploader_id': '3552827',
'age_limit': 0,
'duration': 223.72,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# tests uploader field
'url': 'https://vid.me/4Iib',
'info_dict': {
'id': '4Iib',
'ext': 'mp4',
'title': 'The Carver',
'description': 'md5:e9c24870018ae8113be936645b93ba3c',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1433203629,
'upload_date': '20150602',
'uploader': 'Thomas',
'uploader_id': '109747',
'age_limit': 0,
'duration': 97.859999999999999,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1430931613,
'upload_date': '20150506',
'uploader': 'naked-yogi',
'uploader_id': '1638622',
'age_limit': 18,
'duration': 653.26999999999998,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}, {
# nsfw, user-disabled
'url': 'https://vid.me/dzGJ',
'only_matching': True,
}, {
# suspended
'url': 'https://vid.me/Ox3G',
'only_matching': True,
}, {
# deleted
'url': 'https://vid.me/KTPm',
'only_matching': True,
}, {
# no formats in the API response
'url': 'https://vid.me/e5g',
'info_dict': {
'id': 'e5g',
'ext': 'mp4',
'title': 'Video upload (e5g)',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1401480195,
'upload_date': '20140530',
'uploader': None,
'uploader_id': None,
'age_limit': 0,
'duration': 483,
'view_count': int,
'like_count': int,
'comment_count': int,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
response = self._download_json(
'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
response = self._parse_json(e.cause.read(), video_id)
else:
raise
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error), expected=True)
video = response['video']
if video.get('state') == 'deleted':
raise ExtractorError(
'Vidme said: Sorry, this video has been deleted.',
expected=True)
if video.get('state') in ('user-disabled', 'suspended'):
raise ExtractorError(
'Vidme said: This video has been suspended either due to a copyright claim, '
'or for violating the terms of use.',
expected=True)
formats = []
for f in video.get('formats', []):
format_url = url_or_none(f.get('uri'))
if not format_url:
continue
format_type = f.get('type')
if format_type == 'dash':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id='dash', fatal=False))
elif format_type == 'hls':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'format_id': f.get('type'),
'url': format_url,
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'preference': 0 if f.get('type', '').endswith(
'clip') else 1,
})
if not formats and video.get('complete_url'):
formats.append({
'url': video.get('complete_url'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
})
self._sort_formats(formats)
title = video['title']
description = video.get('description')
thumbnail = video.get('thumbnail_url')
timestamp = parse_iso8601(video.get('date_created'), ' ')
uploader = video.get('user', {}).get('username')
uploader_id = video.get('user', {}).get('user_id')
age_limit = 18 if video.get('nsfw') is True else 0
duration = float_or_none(video.get('duration'))
view_count = int_or_none(video.get('view_count'))
like_count = int_or_none(video.get('likes_count'))
comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
'title': title or 'Video upload (%s)' % video_id,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
'age_limit': age_limit,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'formats': formats,
}
class VidmeListBaseIE(InfoExtractor):
# Max possible limit according to https://docs.vid.me/#api-Videos-List
_LIMIT = 100
def _entries(self, user_id, user_name):
for page_num in itertools.count(1):
page = self._download_json(
'https://api.vid.me/videos/%s?user=%s&limit=%d&offset=%d'
% (self._API_ITEM, user_id, self._LIMIT, (page_num - 1) * self._LIMIT),
user_name, 'Downloading user %s page %d' % (self._API_ITEM, page_num))
videos = page.get('videos', [])
if not videos:
break
for video in videos:
video_url = video.get('full_url') or video.get('embed_url')
if video_url:
yield self.url_result(video_url, VidmeIE.ie_key())
total = int_or_none(page.get('page', {}).get('total'))
if total and self._LIMIT * page_num >= total:
break
def _real_extract(self, url):
user_name = self._match_id(url)
user_id = self._download_json(
'https://api.vid.me/userByUsername?username=%s' % user_name,
user_name)['user']['user_id']
return self.playlist_result(
self._entries(user_id, user_name), user_id,
'%s - %s' % (user_name, self._TITLE))
class VidmeUserIE(VidmeListBaseIE):
IE_NAME = 'vidme:user'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z_-]{6,})(?!/likes)(?:[^\da-zA-Z_-]|$)'
_API_ITEM = 'list'
_TITLE = 'Videos'
_TESTS = [{
'url': 'https://vid.me/MasakoX',
'info_dict': {
'id': '16112341',
'title': 'MasakoX - %s' % _TITLE,
},
'playlist_mincount': 191,
}, {
'url': 'https://vid.me/unsQuare_netWork',
'only_matching': True,
}]
class VidmeUserLikesIE(VidmeListBaseIE):
IE_NAME = 'vidme:user:likes'
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z_-]{6,})/likes'
_API_ITEM = 'likes'
_TITLE = 'Likes'
_TESTS = [{
'url': 'https://vid.me/ErinAlexis/likes',
'info_dict': {
'id': '6483530',
'title': 'ErinAlexis - %s' % _TITLE,
},
'playlist_mincount': 415,
}, {
'url': 'https://vid.me/Kaleidoscope-Ish/likes',
'only_matching': True,
}]
| unlicense | 7,186,849,340,191,600,000 | 32.538983 | 97 | 0.480089 | false |
bk2204/urwid | urwid/tests/test_util.py | 19 | 6613 | # -*- coding: utf-8 -*-
import unittest
import urwid
from urwid import util
from urwid.compat import B
class CalcWidthTest(unittest.TestCase):
def wtest(self, desc, s, exp):
s = B(s)
result = util.calc_width( s, 0, len(s))
assert result==exp, "%s got:%r expected:%r" % (desc, result, exp)
def test1(self):
util.set_encoding("utf-8")
self.wtest("narrow", "hello", 5)
self.wtest("wide char", '\xe6\x9b\xbf', 2)
self.wtest("invalid", '\xe6', 1)
self.wtest("zero width", '\xcc\x80', 0)
self.wtest("mixed", 'hello\xe6\x9b\xbf\xe6\x9b\xbf', 9)
def test2(self):
util.set_encoding("euc-jp")
self.wtest("narrow", "hello", 5)
self.wtest("wide", "\xA1\xA1\xA1\xA1", 4)
self.wtest("invalid", "\xA1", 1)
class ConvertDecSpecialTest(unittest.TestCase):
def ctest(self, desc, s, exp, expcs):
exp = B(exp)
util.set_encoding('ascii')
c = urwid.Text(s).render((5,))
result = c._text[0]
assert result==exp, "%s got:%r expected:%r" % (desc, result, exp)
resultcs = c._cs[0]
assert resultcs==expcs, "%s got:%r expected:%r" % (desc,
resultcs, expcs)
def test1(self):
self.ctest("no conversion", u"hello", "hello", [(None,5)])
self.ctest("only special", u"£££££", "}}}}}", [("0",5)])
self.ctest("mix left", u"££abc", "}}abc", [("0",2),(None,3)])
self.ctest("mix right", u"abc££", "abc}}", [(None,3),("0",2)])
self.ctest("mix inner", u"a££bc", "a}}bc",
[(None,1),("0",2),(None,2)] )
self.ctest("mix well", u"£a£b£", "}a}b}",
[("0",1),(None,1),("0",1),(None,1),("0",1)] )
class WithinDoubleByteTest(unittest.TestCase):
def setUp(self):
urwid.set_encoding("euc-jp")
def wtest(self, s, ls, pos, expected, desc):
result = util.within_double_byte(B(s), ls, pos)
assert result==expected, "%s got:%r expected: %r" % (desc,
result, expected)
def test1(self):
self.wtest("mnopqr",0,2,0,'simple no high bytes')
self.wtest("mn\xA1\xA1qr",0,2,1,'simple 1st half')
self.wtest("mn\xA1\xA1qr",0,3,2,'simple 2nd half')
self.wtest("m\xA1\xA1\xA1\xA1r",0,3,1,'subsequent 1st half')
self.wtest("m\xA1\xA1\xA1\xA1r",0,4,2,'subsequent 2nd half')
self.wtest("mn\xA1@qr",0,3,2,'simple 2nd half lo')
self.wtest("mn\xA1\xA1@r",0,4,0,'subsequent not 2nd half lo')
self.wtest("m\xA1\xA1\xA1@r",0,4,2,'subsequent 2nd half lo')
def test2(self):
self.wtest("\xA1\xA1qr",0,0,1,'begin 1st half')
self.wtest("\xA1\xA1qr",0,1,2,'begin 2nd half')
self.wtest("\xA1@qr",0,1,2,'begin 2nd half lo')
self.wtest("\xA1\xA1\xA1\xA1r",0,2,1,'begin subs. 1st half')
self.wtest("\xA1\xA1\xA1\xA1r",0,3,2,'begin subs. 2nd half')
self.wtest("\xA1\xA1\xA1@r",0,3,2,'begin subs. 2nd half lo')
self.wtest("\xA1@\xA1@r",0,3,2,'begin subs. 2nd half lo lo')
self.wtest("@\xA1\xA1@r",0,3,0,'begin subs. not 2nd half lo')
def test3(self):
self.wtest("abc \xA1\xA1qr",4,4,1,'newline 1st half')
self.wtest("abc \xA1\xA1qr",4,5,2,'newline 2nd half')
self.wtest("abc \xA1@qr",4,5,2,'newline 2nd half lo')
self.wtest("abc \xA1\xA1\xA1\xA1r",4,6,1,'newl subs. 1st half')
self.wtest("abc \xA1\xA1\xA1\xA1r",4,7,2,'newl subs. 2nd half')
self.wtest("abc \xA1\xA1\xA1@r",4,7,2,'newl subs. 2nd half lo')
self.wtest("abc \xA1@\xA1@r",4,7,2,'newl subs. 2nd half lo lo')
self.wtest("abc @\xA1\xA1@r",4,7,0,'newl subs. not 2nd half lo')
class CalcTextPosTest(unittest.TestCase):
def ctptest(self, text, tests):
text = B(text)
for s,e,p, expected in tests:
got = util.calc_text_pos( text, s, e, p )
assert got == expected, "%r got:%r expected:%r" % ((s,e,p),
got, expected)
def test1(self):
text = "hello world out there"
tests = [
(0,21,0, (0,0)),
(0,21,5, (5,5)),
(0,21,21, (21,21)),
(0,21,50, (21,21)),
(2,15,50, (15,13)),
(6,21,0, (6,0)),
(6,21,3, (9,3)),
]
self.ctptest(text, tests)
def test2_wide(self):
util.set_encoding("euc-jp")
text = "hel\xA1\xA1 world out there"
tests = [
(0,21,0, (0,0)),
(0,21,4, (3,3)),
(2,21,2, (3,1)),
(2,21,3, (5,3)),
(6,21,0, (6,0)),
]
self.ctptest(text, tests)
def test3_utf8(self):
util.set_encoding("utf-8")
text = "hel\xc4\x83 world \xe2\x81\x81 there"
tests = [
(0,21,0, (0,0)),
(0,21,4, (5,4)),
(2,21,1, (3,1)),
(2,21,2, (5,2)),
(2,21,3, (6,3)),
(6,21,7, (15,7)),
(6,21,8, (16,8)),
]
self.ctptest(text, tests)
def test4_utf8(self):
util.set_encoding("utf-8")
text = "he\xcc\x80llo \xe6\x9b\xbf world"
tests = [
(0,15,0, (0,0)),
(0,15,1, (1,1)),
(0,15,2, (4,2)),
(0,15,4, (6,4)),
(8,15,0, (8,0)),
(8,15,1, (8,0)),
(8,15,2, (11,2)),
(8,15,5, (14,5)),
]
self.ctptest(text, tests)
class TagMarkupTest(unittest.TestCase):
mytests = [
("simple one", "simple one", []),
(('blue',"john"), "john", [('blue',4)]),
(["a ","litt","le list"], "a little list", []),
(["mix",('high',[" it ",('ital',"up a")])," little"],
"mix it up a little",
[(None,3),('high',4),('ital',4)]),
([u"££", u"x££"], u"££x££", []),
([B("\xc2\x80"), B("\xc2\x80")], B("\xc2\x80\xc2\x80"), []),
]
def test(self):
for input, text, attr in self.mytests:
restext,resattr = urwid.decompose_tagmarkup( input )
assert restext == text, "got: %r expected: %r" % (restext, text)
assert resattr == attr, "got: %r expected: %r" % (resattr, attr)
def test_bad_tuple(self):
self.assertRaises(urwid.TagMarkupException, lambda:
urwid.decompose_tagmarkup((1,2,3)))
def test_bad_type(self):
self.assertRaises(urwid.TagMarkupException, lambda:
urwid.decompose_tagmarkup(5))
| lgpl-2.1 | -8,733,404,378,098,235,000 | 36.02809 | 78 | 0.490062 | false |
mchrzanowski/ProjectEuler | src/python/Problem106.py | 1 | 1943 | '''
Created on Aug 20, 2012
@author: mchrzanowski
'''
import argparse
import Problem105
import time
def main(n_value):
numbers = [number for number in xrange(1, n_value + 1)]
groups_to_check_for_equality = 0
for subset in Problem105.all_subsets(numbers):
first_subset, second_subset = subset
# because we assume that the second rule has been met,
# we can assume that two differently-sized subsets
# cannot possibly be equal
if len(first_subset) != len(second_subset):
continue
# since the sequence is strictly increasing, no two
# sets of cardinality 1 can be equal.
if len(first_subset) == 1:
continue
# to see whether we need to test for equality,
# we compare the values at the same indicies in both subsets.
# if subset A's values are all less than subset B's counterparts,
# then clearly there is no need to test for equality.
smaller = 0
greater = 0
for pair in zip(first_subset, second_subset):
first, second = pair
if first < second:
smaller += 1
else:
greater += 1
if smaller == len(first_subset) or greater == len(first_subset):
continue
# these are the ones that have survived and need further processing.
groups_to_check_for_equality += 1
print "Groups to check for equality: %d" % groups_to_check_for_equality
if __name__ == '__main__':
begin = time.time()
parser = argparse.ArgumentParser(
description="Problem 106. URL: http://projecteuler.net/problem=106")
parser.add_argument('-n', type=int,
help="Number of elements in the set. Corresponds to the " +
"'n' value in the problem")
args = vars(parser.parse_args())
main(args['n'])
end = time.time()
print "Runtime: %f seconds." % (end - begin)
| mit | -677,884,731,423,814,500 | 28.439394 | 76 | 0.606794 | false |
mike-lawrence/actichampy | pycorder/impedance.py | 1 | 18694 | # -*- coding: utf-8 -*-
'''
Impedance Display Module
PyCorder ActiChamp Recorder
------------------------------------------------------------
Copyright (C) 2010, Brain Products GmbH, Gilching
This file is part of PyCorder
PyCorder is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCorder. If not, see <http://www.gnu.org/licenses/>.
------------------------------------------------------------
@author: Norbert Hauser
@date: $Date: 2013-06-07 19:21:40 +0200 (Fr, 07 Jun 2013) $
@version: 1.0
B{Revision:} $LastChangedRevision: 198 $
'''
from PyQt4 import Qwt5 as Qwt
from modbase import *
from res import frmImpedanceDisplay
class IMP_Display(ModuleBase):
''' Display impedance values
'''
def __init__(self, *args, **keys):
''' Constructor
'''
ModuleBase.__init__(self, name="Impedance Display", **keys)
# XML parameter version
# 1: initial version
self.xmlVersion = 1
# set default values
self.params = None
self.data = None
self.dataavailable = False
self.impDialog = None #: Impedance dialog widget
self.range_max = 50 #: Impedance range 0-range_max in KOhm
self.show_values = True #: Show numerical impedance values
def terminate(self):
''' Destructor
'''
# close dialog widget on exit
if self.impDialog != None:
self.impDialog.close()
self.impDialog = None
def setDefault(self):
''' Set all module parameters to default values
'''
self.range_max = 50
self.show_values = True
def process_start(self):
''' Prepare and open impedance dialog if recording mode == IMPEDANCE
'''
# create and show the impedance dialog
if self.params.recording_mode == RecordingMode.IMPEDANCE:
if self.impDialog == None:
# impedance dialog should be always on top
topLevelWidgets = Qt.QApplication.topLevelWidgets()
activeWindow = Qt.QApplication.activeWindow()
if activeWindow:
self.impDialog = DlgImpedance(self, Qt.QApplication.activeWindow())
else:
if len(topLevelWidgets):
self.impDialog = DlgImpedance(self, topLevelWidgets[0])
else:
self.impDialog = DlgImpedance(self)
self.impDialog.setWindowFlags(Qt.Qt.Tool)
self.impDialog.show()
self.impDialog.updateLabels(self.params)
else:
self.impDialog.updateLabels(self.params)
self.sendColorRange()
else:
if self.impDialog != None:
self.impDialog.close()
self.impDialog = None
def process_stop(self):
''' Close impedance dialog
'''
if self.impDialog != None:
self.impDialog.close()
self.impDialog = None
def process_update(self, params):
''' Get channel properties and
propagate parameter update down to all attached receivers
'''
self.params = params
# propagate down
return params
def process_input(self, datablock):
''' Get data from input queue and update display
'''
self.dataavailable = True
self.data = datablock
# nothing to do if not in impedance mode
if datablock.recording_mode != RecordingMode.IMPEDANCE:
return
# check for an outdated impedance structure
if len(datablock.impedances) > 0 or len(datablock.channel_properties) != len(self.params.channel_properties):
raise ModuleError(self._object_name, "outdated impedance structure received!")
if self.impDialog != None:
self.emit(Qt.SIGNAL('update(PyQt_PyObject)'), datablock)
def process_output(self):
''' Put processed data into output queue
'''
if not self.dataavailable:
return None
self.dataavailable = False
return self.data
def getXML(self):
''' Get module properties for XML configuration file
@return: objectify XML element::
e.g.
<IMP_Display instance="0" version="1">
<range_max>50</range_max>
...
</IMP_Display>
'''
E = objectify.E
cfg = E.IMP_Display(E.range_max(self.range_max),
E.show_values(self.show_values),
version=str(self.xmlVersion),
instance=str(self._instance),
module="impedance")
return cfg
def setXML(self, xml):
''' Set module properties from XML configuration file
@param xml: complete objectify XML configuration tree,
module will search for matching values
'''
# search my configuration data
displays = xml.xpath("//IMP_Display[@module='impedance' and @instance='%i']"%(self._instance) )
if len(displays) == 0:
# configuration data not found, leave everything unchanged
return
# we should have only one display instance from this type
cfg = displays[0]
# check version, has to be lower or equal than current version
version = cfg.get("version")
if (version == None) or (int(version) > self.xmlVersion):
self.send_event(ModuleEvent(self._object_name, EventType.ERROR, "XML Configuration: wrong version"))
return
version = int(version)
# get the values
try:
self.range_max = cfg.range_max.pyval
self.show_values = cfg.show_values.pyval
except Exception as e:
self.send_exception(e, severity=ErrorSeverity.NOTIFY)
def sendColorRange(self):
''' Send new impedance color range as ModuleEvent to update ActiCap LED color range
'''
val = tuple([self.range_max / 3.0, self.range_max * 2.0 / 3.0])
self.send_event(ModuleEvent(self._object_name, EventType.COMMAND, info="ImpColorRange",
cmd_value = val))
'''
------------------------------------------------------------
IMPEDANCE DIALOG
------------------------------------------------------------
'''
class DlgImpedance(Qt.QDialog, frmImpedanceDisplay.Ui_frmImpedanceDisplay):
''' Impedance display dialog
'''
def __init__(self, module, *args):
''' Constructor
@param module: parent module
'''
apply(Qt.QDialog.__init__, (self,) + args)
self.setupUi(self)
self.module = module
self.params = None # last received parameter block
self.data = None # last received data block
# create table view grid (10x16 eeg electrodes + 1 row for ground electrode)
cc = 10
rc = 16
self.tableWidgetValues.setColumnCount(cc)
self.tableWidgetValues.setRowCount(rc+1)
self.tableWidgetValues.horizontalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.tableWidgetValues.horizontalHeader().setDefaultAlignment(Qt.Qt.Alignment(Qt.Qt.AlignCenter))
self.tableWidgetValues.verticalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.tableWidgetValues.verticalHeader().setDefaultAlignment(Qt.Qt.Alignment(Qt.Qt.AlignCenter))
# add ground electrode row
self.tableWidgetValues.setSpan(rc,0,1,cc)
# row headers
rheader = Qt.QStringList()
for r in xrange(rc):
rheader.append("%d - %d"%(r*cc+1, r*cc+cc))
rheader.append("GND")
self.tableWidgetValues.setVerticalHeaderLabels(rheader)
# create cell items
fnt = Qt.QFont()
fnt.setPointSize(8)
for r in xrange(rc):
for c in xrange(cc):
item = Qt.QTableWidgetItem()
item.setTextAlignment(Qt.Qt.AlignCenter)
item.setFont(fnt)
self.tableWidgetValues.setItem(r, c, item)
# GND electrode cell
item = Qt.QTableWidgetItem()
item.setTextAlignment(Qt.Qt.AlignCenter)
item.setFont(fnt)
item.setText("GND")
self.tableWidgetValues.setItem(rc, 0, item)
self.defaultColor = item.backgroundColor()
# set range list
self.comboBoxRange.clear()
self.comboBoxRange.addItem("15")
self.comboBoxRange.addItem("50")
self.comboBoxRange.addItem("100")
self.comboBoxRange.addItem("500")
# set validators
validator = Qt.QIntValidator(self)
validator.setBottom(15)
validator.setTop(500)
self.comboBoxRange.setValidator(validator)
self.comboBoxRange.setEditText(str(self.module.range_max))
# setup color scale
self.linearscale = False
self.scale_engine = Qwt.QwtLinearScaleEngine()
self.scale_interval = Qwt.QwtDoubleInterval(0, self.module.range_max)
self.scale_map = Qwt.QwtLinearColorMap(Qt.Qt.green, Qt.Qt.red)
if self.linearscale:
self.scale_map.addColorStop(0.45, Qt.Qt.yellow)
self.scale_map.addColorStop(0.55, Qt.Qt.yellow)
self.scale_map.setMode(Qwt.QwtLinearColorMap.ScaledColors)
else:
self.scale_map.addColorStop(0.33, Qt.Qt.yellow)
self.scale_map.addColorStop(0.66, Qt.Qt.red)
self.scale_map.setMode(Qwt.QwtLinearColorMap.FixedColors)
self.ScaleWidget.setColorMap(self.scale_interval, self.scale_map)
self.ScaleWidget.setColorBarEnabled(True)
self.ScaleWidget.setColorBarWidth(30)
self.ScaleWidget.setBorderDist(10,10)
# set default values
self.setColorRange(0, self.module.range_max)
self.checkBoxValues.setChecked(self.module.show_values)
# actions
self.connect(self.comboBoxRange, Qt.SIGNAL("editTextChanged(QString)"), self._rangeChanged)
self.connect(self.checkBoxValues, Qt.SIGNAL("stateChanged(int)"), self._showvalues_changed)
self.connect(self.module, Qt.SIGNAL('update(PyQt_PyObject)'), self._updateValues)
def _rangeChanged(self, rrange):
''' SIGNAL range combo box value has changed
@param range: new range value in KOhm
'''
# validate range
valid = self.comboBoxRange.validator().validate(rrange,0)[0]
if valid != Qt.QValidator.Acceptable:
return
# use new range
newrange,ok = rrange.toInt()
if ok:
self.setColorRange(0, newrange)
self.module.range_max = newrange
self._updateValues(self.data)
self.module.sendColorRange()
def _showvalues_changed(self, state):
''' SIGNAL show values radio button clicked
'''
self.module.show_values = (state == Qt.Qt.Checked)
self._updateValues(self.data)
def setColorRange(self, cmin, cmax):
''' Create new color range for the scale widget
'''
self.scale_interval.setMaxValue(cmax)
self.scale_interval.setMinValue(cmin)
self.ScaleWidget.setColorMap(self.scale_interval, self.scale_map)
self.ScaleWidget.setScaleDiv(self.scale_engine.transformation(),
self.scale_engine.divideScale(self.scale_interval.minValue(),
self.scale_interval.maxValue(),
5, 2))
def closeEvent(self, event):
''' Dialog want's close, send stop request to main window
'''
self.setParent(None)
self.disconnect(self.module, Qt.SIGNAL('update(PyQt_PyObject)'), self._updateValues)
if self.sender() == None:
self.module.send_event(ModuleEvent(self.module._object_name, EventType.COMMAND, "Stop"))
event.accept()
def reject(self):
''' ESC key pressed, Dialog want's close, just ignore it
'''
return
def _setLabelText(self, row, col, text):
item = self.tableWidgetValues.item(row, col)
item.setText(text)
item.setBackgroundColor(Qt.QColor(128,128,128))
item.label = text
def updateLabels(self, params):
''' Update cell labels
'''
# copy channel configuration
self.params = copy.deepcopy(params)
# update cells
cc = self.tableWidgetValues.columnCount()
rc = self.tableWidgetValues.rowCount() - 1
# reset items
for row in xrange(rc):
for col in xrange(cc):
item = self.tableWidgetValues.item(row, col)
item.setText("")
item.label = ""
item.setBackgroundColor(Qt.Qt.white)
# set channel labels
for idx, ch in enumerate(self.params.channel_properties):
if (ch.enable or ch.isReference) and (ch.input > 0) and (ch.input <= rc*cc) and (ch.inputgroup == ChannelGroup.EEG):
row = (ch.input-1) / cc
col = (ch.input-1) % cc
# channel has a reference impedance value?
if self.params.eeg_channels[idx, ImpedanceIndex.REF] == 1:
# prefix the channel name
name = ch.name + " " + ImpedanceIndex.Name[ImpedanceIndex.DATA]
self._setLabelText(row, col, name)
# put the reference values at the following table item, if possible
name = ch.name + " " + ImpedanceIndex.Name[ImpedanceIndex.REF]
row = (ch.input) / cc
col = (ch.input) % cc
self._setLabelText(row, col, name)
else:
self._setLabelText(row, col, ch.name)
def _getValueText(self, impedance):
''' evaluate the impedance value and get the text and color for display
@return: text and color
'''
if impedance > CHAMP_IMP_INVALID:
valuetext = "disconnected"
color = Qt.QColor(128,128,128)
else:
v = impedance / 1000.0
if impedance == CHAMP_IMP_INVALID:
valuetext = "out of range"
else:
valuetext = "%.0f"%(v)
color = self.ScaleWidget.colorMap().color(self.ScaleWidget.colorBarInterval(), v)
return valuetext, color
def _updateValues(self, data):
''' SIGNAL send from impedance module to update cell values
@param data: EEG_DataBlock
'''
if data == None:
return
# keep the last data block
self.data = copy.deepcopy(data)
# check for an outdated impedance structure
if len(data.impedances) > 0 or len(data.channel_properties) != len(self.params.channel_properties):
print "outdated impedance structure received!"
return
cc = self.tableWidgetValues.columnCount()
rc = self.tableWidgetValues.rowCount() - 1
# EEG electrodes
gndImpedance = None
impCount = 0
for idx, ch in enumerate(data.channel_properties):
if (ch.enable or ch.isReference) and (ch.input > 0) and (ch.input <= rc*cc) and (ch.inputgroup == ChannelGroup.EEG):
impCount += 1
row = (ch.input-1) / cc
col = (ch.input-1) % cc
item = self.tableWidgetValues.item(row, col)
# channel has a data impedance value?
if self.params.eeg_channels[idx, ImpedanceIndex.DATA] == 1:
# data channel value
value, color = self._getValueText(data.eeg_channels[idx, ImpedanceIndex.DATA])
item.setBackgroundColor(color)
if self.module.show_values:
item.setText("%s\n%s"%(item.label, value))
else:
item.setText(item.label)
# channel has a reference impedance value?
if self.params.eeg_channels[idx, ImpedanceIndex.REF] == 1:
row = (ch.input) / cc
col = (ch.input) % cc
item = self.tableWidgetValues.item(row, col)
# reference channel value
value, color = self._getValueText(data.eeg_channels[idx, ImpedanceIndex.REF])
item.setBackgroundColor(color)
if self.module.show_values:
item.setText("%s\n%s"%(item.label, value))
else:
item.setText(item.label)
# channel has a GND impedance value?
if gndImpedance == None and self.params.eeg_channels[idx, ImpedanceIndex.GND] == 1:
gndImpedance = data.eeg_channels[idx, ImpedanceIndex.GND]
# GND electrode, take the value of the first EEG electrode
item = self.tableWidgetValues.item(rc, 0)
if gndImpedance == None:
item.setText("")
item.setBackgroundColor(Qt.Qt.white)
else:
value, color = self._getValueText(gndImpedance)
item.setBackgroundColor(color)
if self.module.show_values:
item.setText("%s\n%s"%("GND", value))
else:
item.setText("GND")
| gpl-3.0 | -6,931,737,067,982,420,000 | 37.944444 | 128 | 0.55467 | false |
landlab/landlab | tests/grid/test_raster_grid/test_bc_updates.py | 3 | 1905 | import numpy as np
from numpy.testing import assert_array_equal
from landlab import LinkStatus, RasterModelGrid
def test_issue_428_a():
"""Issue #428"""
grid = RasterModelGrid((4, 4))
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
assert grid.status_at_node[1] == 4
assert grid.status_at_link[4] == 4
assert_array_equal(grid.active_link_dirs_at_node[1], [0, 0, 0, 0])
grid.status_at_node[1] = 1
assert grid.status_at_link[4] == 0
assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])
def test_issue_428_b():
"""Issue #428"""
grid = RasterModelGrid((4, 4))
z = np.ones(grid.number_of_nodes)
z[grid.nodes_at_bottom_edge] = -9999.0
z[grid.nodes_at_left_edge] = -9999.0
z[grid.nodes_at_top_edge] = -9999.0
z[grid.nodes_at_right_edge] = -9999.0
z[1] = 0.5
assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])
grid.set_watershed_boundary_condition(z)
assert_array_equal(grid.active_link_dirs_at_node[1], [0, -1, 0, 0])
def test_link_update_with_nodes_closed():
rmg = RasterModelGrid((4, 5))
rmg.status_at_node[rmg.nodes_at_bottom_edge] = rmg.BC_NODE_IS_CLOSED
inactive_array = np.array([LinkStatus.INACTIVE] * 5)
assert_array_equal(rmg.status_at_link[4:9], inactive_array)
def test_link_update_with_nodes_fixed_grad():
rmg = RasterModelGrid((4, 5))
rmg.status_at_node[rmg.nodes_at_bottom_edge] = rmg.BC_NODE_IS_FIXED_GRADIENT
fixed_array = np.array([LinkStatus.FIXED] * 3)
assert_array_equal(rmg.status_at_link[5:8], fixed_array)
def test_bc_set_code_init():
grid = RasterModelGrid((4, 4))
assert grid.bc_set_code == grid.BC_NODE_IS_CORE
def test_bc_set_code_change():
rmg = RasterModelGrid((4, 5))
rmg.status_at_node[rmg.nodes_at_bottom_edge] = rmg.BC_NODE_IS_CLOSED
assert rmg.bc_set_code != rmg.BC_NODE_IS_CORE
| mit | -8,330,857,016,671,884,000 | 30.75 | 80 | 0.656693 | false |
unreal666/youtube-dl | youtube_dl/utils.py | 2 | 121771 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_basestring,
compat_chr,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_socket_create_connection,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/rg3/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def sanitized_Request(url, *args, **kwargs):
return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/rg3/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message():
if ytdl_is_updateable():
update_cmd = 'type youtube-dl -U to update'
else:
update_cmd = 'see https://yt-dl.org/update on how to update'
msg = '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
return msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
pass
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None):
super(GeoRestrictedError, self).__init__(msg, expected=True)
self.msg = msg
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
super(PostProcessingError, self).__init__(msg)
self.msg = msg
class MaxDownloadsReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT) or
'No space left' in self.msg or 'Disk quota excedded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/rg3/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = compat_socket_create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/rg3/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/rg3/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
def extract_timezone(date_str):
m = re.search(
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
# A bad approximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'\b(\d+)[pPiI]\b', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:https?:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except ValueError:
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if int_str is None:
return None
int_str = re.sub(r'[,\.\+]', '', int_str)
return int(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except ValueError:
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v):
return None if v is None else v.strip()
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url) else None
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if youtube-dl is run in the background.
# See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
out, _ = subprocess.Popen(
[encodeArgument(exe)] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = None
if self._use_cache:
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = '\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
if not isinstance(getter, (list, tuple)):
getter = [getter]
for get in getter:
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged or
(isinstance(v, compat_str) and v and
isinstance(merged[k], compat_str) and
not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code):
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v.startswith('/*') or v.startswith('//') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if youtube-dl can be updated with -U """
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.split(';')[0].strip().lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
}.get(res, res)
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
splited_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
for full_codec in splited_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(splited_codecs) == 2:
return {
'vcodec': vcodec,
'acodec': acodec,
}
elif len(splited_codecs) == 1:
return {
'vcodec': 'none',
'acodec': vcodec,
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data):
""" Render a list of rows, each as a list of values """
table = [header_row] + data
max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None or
m.group('strval') is not None or
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/rg3/youtube-dl/issues/11082).
actual_value is not None and m.group('intval') is not None and
isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
quote = m.group('quote')
if quote is not None:
comparison_value = comparison_value.replace(r'\%s' % quote, quote)
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(params, param, default=[]):
ex_args = params.get(param)
if ex_args is None:
return default
assert isinstance(ex_args, list)
return ex_args
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '85.94.160.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '159.117.192.0/21',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '84.112.0.0/13',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AZ': '5.191.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '129.45.128.0/17',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '192.131.134.0/24',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '152.240.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '196.32.200.0/21',
'CG': '197.214.128.0/17',
'CH': '85.0.0.0/13',
'CI': '154.232.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '165.210.0.0/15',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '46.198.0.0/15',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '45.208.0.0/14',
'GI': '85.115.128.0/19',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '126.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.32.32.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '192.147.231.0/24',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '41.86.0.0/19',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '105.234.0.0/16',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '139.26.0.0/16',
'RO': '79.112.0.0/13',
'RS': '178.220.0.0/14',
'RU': '5.136.0.0/13',
'RW': '105.178.0.0/15',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '154.96.0.0/13',
'SE': '78.64.0.0/12',
'SG': '152.56.0.0/14',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '197.215.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '197.220.64.0/19',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '93.72.0.0/13',
'UG': '154.224.0.0/13',
'US': '3.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '82.215.64.0/18',
'VA': '212.77.0.0/19',
'VC': '24.92.144.0/20',
'VE': '186.88.0.0/13',
'VG': '172.103.64.0/18',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '165.56.0.0/13',
'ZW': '41.85.192.0/19',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# youtube-dl's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfucasted_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfucasted_code)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/rg3/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/rg3/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'youtube-dl requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)] +
[encodeArgument(o) for o in opts] +
[encodeFilename(path, True)])
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
return {
year_field: str(random.randint(1950, 1995)),
month_field: str(random.randint(1, 12)),
day_field: str(random.randint(1, 31)),
}
| unlicense | 3,374,019,621,751,892,500 | 30.05998 | 130 | 0.529817 | false |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/win32com/servers/perfmon.py | 18 | 1045 | """A COM Server which exposes the NT Performance monitor in a very rudimentary way
Usage from VB:
set ob = CreateObject("Python.PerfmonQuery")
freeBytes = ob.Query("Memory", "Available Bytes")
"""
from win32com.server import exception, register
import pythoncom, win32pdhutil, winerror
class PerfMonQuery:
_reg_verprogid_ = "Python.PerfmonQuery.1"
_reg_progid_ = "Python.PerfmonQuery"
_reg_desc_ = "Python Performance Monitor query object"
_reg_clsid_ = "{64cef7a0-8ece-11d1-a65a-00aa00125a98}"
_reg_class_spec_ = "win32com.servers.perfmon.PerfMonQuery"
_public_methods_ = [ 'Query' ]
def Query(self, object, counter, instance = None, machine = None):
try:
return win32pdhutil.GetPerformanceAttributes(object, counter, instance, machine=machine)
except win32pdhutil.error, exc:
raise exception.Exception(desc=exc.strerror)
except TypeError, desc:
raise exception.Exception(desc=desc,scode=winerror.DISP_E_TYPEMISMATCH)
if __name__=='__main__':
print "Registering COM server..."
register.UseCommandLine(PerfMonQuery)
| gpl-2.0 | 4,480,837,282,250,981,000 | 37.703704 | 91 | 0.752153 | false |
9seconds/fuel-pdsh | fuelpdsh/ssh.py | 1 | 1494 | # -*- coding: utf-8 -*-
import sys
import asyncssh
import fuelpdsh
LOG = fuelpdsh.logger(__name__)
"""Logger."""
class SSHClientSession(asyncssh.SSHClientSession):
PREFIX_LENGTH = 10
def __init__(self, hostname):
super(SSHClientSession, self).__init__()
self.obuffer = ""
self.ebuffer = ""
self.prefix = hostname.ljust(self.PREFIX_LENGTH) + ": "
def data_received(self, data, datatype):
if datatype == asyncssh.EXTENDED_DATA_STDERR:
self.ebuffer += data
self.ebuffer = self.doprint(self.ebuffer, stderr=True)
else:
self.obuffer += data
self.obuffer = self.doprint(self.obuffer, stderr=False)
return super(SSHClientSession, self).data_received(data, datatype)
def doprint(self, buf, *, flush=False, stderr=False):
if not buf:
return buf
stream = sys.stderr if stderr else sys.stdout
if flush:
print(self.data(buf), file=stream)
return ""
buf = buf.split("\n")
for chunk in buf[:-1]:
print(self.data(chunk), file=stream)
return buf[-1] if buf else ""
def data(self, text):
return self.prefix + text
def connection_lost(self, exc):
self.doprint(self.obuffer, stderr=False, flush=True)
self.doprint(self.ebuffer, stderr=True, flush=True)
if exc:
LOG.error("SSH connection %s has been dropped: %s", self, exc)
| mit | -2,341,218,159,560,355,000 | 23.9 | 74 | 0.588353 | false |
LibraryOfCongress/gazetteer | conflate/ngram.py | 1 | 3527 | import array, re, math, json
def ngrams(text, n=3):
# this bit is special for Geonames:
text = re.sub(r'\s*\(.*?\)\s*', '', text)
text = re.sub(r'[^a-z]', '', text.strip().lower())
text = " " * (n-1) + text + " " * (n-1)
grams = []
for i in range(len(text)-n+1):
grams.append(text[i:i+n])
return grams
def ngram_similarity(text1, text2, n=3):
ngrams1, ngrams2 = ngrams(text1), ngrams(text2)
match = 0
for ngram in set(ngrams1):
match += 2 * min(ngrams1.count(ngram), ngrams2.count(ngram))
return match / float(len(ngrams1) + len(ngrams2))
NGRAM_BASE = ord('z')-ord('a')+2 # " " == 0
NGRAM_MIN = ord('a')-1
_ngram_cache = {}
def index_ngram(text):
if text in _ngram_cache: return _ngram_cache[text]
k = 0
for c in text:
k *= NGRAM_BASE
k += 0 if c == " " else ord(c)-NGRAM_MIN
_ngram_cache[text] = k
return k
class NGramSet(object):
def __init__(self, n=3):
self.n = 3
self.ngram_count = NGRAM_BASE ** self.n
self.counts = array.array('L', (0 for i in range(self.ngram_count)))
self.total = 0
def add(self, text):
for ngram in set(ngrams(text, self.n)):
#print ngram, index_ngram(ngram)
self.counts[index_ngram(ngram)] += 1
self.total += 1
def __getitem__(self, lookup):
if type(lookup) is str: lookup = index_ngram(lookup)
return self.counts[lookup]
def __len__(self):
return self.total
def idf(self, lookup):
return math.log(len(self)/self[lookup])
def tf_idf(self, text):
return sum(self.idf(ngram) for ngram in ngrams(text, self.n))
def mutual_tf_idf(self, text1, text2):
ngrams1 = ngrams(text1, self.n)
ngrams2 = ngrams(text2, self.n)
result = sum(self.idf(ngram) for ngram in ngrams1 if ngram in ngrams2) \
+ sum(self.idf(ngram) for ngram in ngrams2 if ngram in ngrams1)
return result / sum(self.idf(ngram) for ngram in (ngrams1 + ngrams2))
def dump(self, f):
self.counts[0] = self.total # because " " isn't a legit trigram
self.counts.tofile(f)
def load(self, f):
self.counts = array.array("L")
self.counts.fromfile(f, self.ngram_count)
self.total = self.counts[0]
class LessCleverNGramSet(NGramSet):
def __init__(self, n=3):
NGramSet.__init__(self, n)
self.counts = {}
def add(self, text):
for ngram in set(ngrams(text, self.n)):
self.counts.setdefault(ngram, 0)
self.counts[ngram] += 1
self.total += 1
def __getitem__(self, lookup):
return self.counts[lookup]
def dump(self, f):
self.counts["total"] = self.total # because "total" isn't a legit trigram
json.dump(self.counts, f)
def load(self, f):
self.counts = json.load(f)
self.total = self.counts["total"]
if __name__ == "__main__":
import sys
fname = sys.argv[1]
store = LessCleverNGramSet()
try:
store.load(file(fname))
except IOError:
pass
if len(sys.argv) < 3:
print "BEGIN:", store.total
for line in sys.stdin:
store.add(line)
store.dump(file(fname, "wb"))
print "END:", store.total
else:
for word in sys.argv[2:]:
print "%s (%s) tf_idf:%.5f" % (
word, ", ".join(("'%s':%d" % gram, store[gram]) for gram in ngrams(word)),
store.tf_idf(word))
| mit | 1,661,719,351,906,280,200 | 29.669565 | 95 | 0.555713 | false |
hmartiro/zircon | zircon/utils/export.py | 1 | 2230 | """
"""
import csv
import argparse
class DataExporter():
def __init__(self, datastore=None):
if not datastore:
from zircon.datastores.influx import InfluxDatastore
datastore = InfluxDatastore()
self.db = datastore
def export_csv(self, signals, t0, t1, dt, aggregate='first', limit=0):
for signal in signals:
result = self.db.get_timeseries(
[signal],
t0,
t1,
dt,
aggregate,
limit
)
if signal not in result:
print('Zero points found for signal {}, skipping.'.format(
signal
))
return
timeseries = result[signal]
print('Exporting {} points for signal {}.'.format(
len(timeseries),
signal
))
with open('{}.csv'.format(signal), 'w') as f:
writer = csv.writer(f, delimiter=' ')
writer.writerow(['Timestamp', 'Value'])
for point in timeseries:
writer.writerow(point)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Export signal data from the database.')
parser.add_argument('--t0', type=int, help='start time (us)',
default=0)
parser.add_argument('--t1', type=int, help='end time (us)',
default=2000000000000000)
parser.add_argument('-d', '--dt', type=int,
help='sample rate (us)', default=1)
parser.add_argument('-a', '--aggregate', type=str,
help='aggregate function', default='first')
parser.add_argument('-l', '--limit', type=int,
help='max number of points per signal', default=0)
parser.add_argument('signals', type=str, nargs='+',
help='signal IDs to export')
args = parser.parse_args()
print(args)
de = DataExporter()
de.export_csv(
args.signals,
t0=args.t0,
t1=args.t1,
dt=args.dt,
aggregate=args.aggregate,
limit=args.limit
)
| mit | 5,935,580,710,210,425,000 | 26.195122 | 74 | 0.497309 | false |
Ayub-Khan/edx-platform | lms/djangoapps/certificates/migrations/0002_data__certificatehtmlviewconfiguration_data.py | 26 | 2181 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
# Converted from the original South migration 0020_certificatehtmlviewconfiguration_data.py
from django.db import migrations, models
def forwards(apps, schema_editor):
"""
Bootstraps the HTML view template with some default configuration parameters
"""
config = {
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "Your Platform Name Here",
"company_about_url": "http://www.example.com/about-us",
"company_privacy_url": "http://www.example.com/privacy-policy",
"company_tos_url": "http://www.example.com/terms-service",
"company_verified_certificate_url": "http://www.example.com/verified-certificate",
"logo_src": "/static/certificates/images/logo.png",
"logo_url": "http://www.example.com"
},
"honor": {
"certificate_type": "Honor Code",
"certificate_title": "Certificate of Achievement",
},
"verified": {
"certificate_type": "Verified",
"certificate_title": "Verified Certificate of Achievement",
}
}
certificate_html_view_configuration_model = apps.get_model("certificates", "CertificateHtmlViewConfiguration")
db_alias = schema_editor.connection.alias
objects = certificate_html_view_configuration_model.objects.using(db_alias)
if not objects.exists():
objects.create(
configuration=json.dumps(config),
enabled=False,
)
def backwards(apps, schema_editor):
"""
Rolling back to zero-state, so remove all currently-defined configurations
"""
certificate_html_view_configuration_model = apps.get_model("certificates", "CertificateHtmlViewConfiguration")
db_alias = schema_editor.connection.alias
certificate_html_view_configuration_model.objects.using(db_alias).all().delete()
class Migration(migrations.Migration):
dependencies = [
('certificates', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| agpl-3.0 | -2,469,787,986,141,617,000 | 34.177419 | 114 | 0.648785 | false |
phr34k/serpent | packages/serpent-msbuild/validate.py | 1 | 5067 | import os, sys, re
required_intdir = '$(ADK_INT_DIR)'
required_outputdir = '$(ADK_BIN_PATH)'
required_targets = ['Debug|Win32', 'Release|Win32']
def is_external(path):
if 'Thirdparty' not in path and 'extern' not in path and 'msvc' not in path:
return False
return True
def is_properties(path):
if path.endswith('.props'):
return True
return False
def verify(path):
result = open(path).read();
if is_external(path) == False and is_properties(path) == False:
# Check for Solution Dir references, this will in general break consuming msbuild project in different solutions
if '$(SolutionDir)' in result:
print ('Error: project %s depends on $(SolutionDir)' % path)
# Parse the msbuild file with python to check for more detailed syntax and incompatabillity.
import xml.etree.ElementTree as ET
result = re.sub('\\sxmlns="[^"]+"', '', result, count=1)
root = ET.fromstring(result)
# Check the OutDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/OutDir'):
if required_outputdir not in e.text:
print ('Error: project %s does not output in %s' % (path, required_outputdir))
# Check the OutDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/OutputPath'):
if required_outputdir not in e.text:
print ('Error: project %s does not output in %s' % (path, required_outputdir))
# Check the IntDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/IntDir'):
if required_intdir not in e.text:
print ('Error: project %s intermediate dir does not contain %s' % (path, required_intdir))
# Check the IntDir references in a project, we want these references to include the Platform macro
for e in root.findall('PropertyGroup/IntDir'):
if '$(Platform)' not in e.text:
print ('Error: project %s intermediate dir does not contain $(Platform)' % path)
# Check the IntDir references in a project, we want these references to include the Configuration macro
for e in root.findall('PropertyGroup/IntDir'):
if '$(Configuration)' not in e.text:
print ('Error: project %s intermediate dir does not contain $(Configuration)' % path)
# Check the IntDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/BaseIntermediateOutputPath'):
if required_intdir not in e.text:
print ('Error: project %s intermediate dir does not contain %s' % (path, required_intdir))
# Check the IntDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/IntermediateOutputPath'):
if '$(Platform)' not in e.text:
print ('Error: project %s intermediate dir does not contain $(Platform)' % path)
print e.text
# Check the IntDir references in a project, we want the references to refer to a single location so that all binaries can easilly be distributed.
for e in root.findall('PropertyGroup/IntermediateOutputPath'):
if '$(Configuration)' not in e.text:
print ('Error: project %s intermediate dir does not contain $(Configuration)' % path)
print e.text
# Check for the required project configurations.
desired = {}
for e in root.findall('ItemGroup/ProjectConfiguration'):
desired[e.attrib['Include']] = True
for e in root.findall('PropertyGroup'):
if 'Condition' in e.attrib:
if re.match("\s*'\$\(Configuration\)\|\$\(Platform\)'\s*==\s*'([^']+)'\s*", e.attrib['Condition']):
desired[re.match("\s*'\$\(Configuration\)\|\$\(Platform\)'\s*==\s*'([^']+)'\s*", e.attrib['Condition']).groups(1)[0]] = True
for target in required_targets:
if target not in desired:
print ('Error: project %s does not support %s' % (path, target))
else:
del desired[target]
# Find all ClInclude
for e in root.findall('ClInclude/*'):
if re.match("[A-Z]:/*", e.attrib['Include']):
print ('Error: project %s contains absolute paths to file ' % (path, e.attrib['Include']))
# Find all ClCompile
for e in root.findall('ClCompile/*'):
if re.match("[A-Z]:/*", e.attrib['Include']):
print ('Error: project %s contains absolute paths to file ' % (path, e.attrib['Include']))
def scan(path):
for subdir, dirs, files in os.walk(path):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".vcxproj"):
verify(filepath)
if filepath.endswith(".csproj"):
verify(filepath)
if filepath.endswith(".vbproj"):
verify(filepath)
if filepath.endswith(".props"):
verify(filepath)
if __name__ == "__main__":
if len(sys.argv) >= 1:
scan(sys.argv[1]) | mit | -2,115,450,685,053,563,100 | 42.689655 | 147 | 0.6866 | false |
primecloud-controller-org/primecloud-controller | iaas-gw/src/iaasgw/controller/ec2/ec2LoadBalancercontroller.py | 5 | 25854 | # coding: UTF-8
#
# Copyright 2014 by SCSK Corporation.
#
# This file is part of PrimeCloud Controller(TM).
#
# PrimeCloud Controller(TM) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# PrimeCloud Controller(TM) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PrimeCloud Controller(TM). If not, see <http://www.gnu.org/licenses/>.
#
from iaasgw.log.log import IaasLogger
from iaasgw.module.ec2.ec2module import Listener, HealthCheck, \
LoadBalancerDescription
from iaasgw.utils.stringUtils import isNotEmpty, isBit
from sqlalchemy.sql.expression import and_
import traceback
class ec2LoadBalancercontroller(object):
logger = IaasLogger()
client = None
conn = None
platforminfo = None
STOPPED = "STOPPED"
STARTING = "STARTING"
RUNNING = "RUNNING"
STOPPING = "STOPPING"
CONFIGURING = "CONFIGURING"
WARNING = "WARNING"
STATUS={
STOPPED:STOPPED,
RUNNING:RUNNING,
STOPPING:STOPPING,
CONFIGURING:CONFIGURING,
WARNING:WARNING,
}
def __init__(self, platforminfo, ec2iaasclientLb, conn):
self.client = ec2iaasclientLb
self.conn = conn
self.platforminfo = platforminfo
def getStatusString(self, key):
if not key:
return "STOPPED"
value = self.STATUS[key]
if value != None:
return value
return "STOPPED"
def createLoadBalancer(self, farmNo, loadBalancerNo, availabilityZones, subnets, groupmap) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ作成情報
loadBalancerName = awsLoadBalancer["NAME"]
# 内部ロードバランサ
internal = awsLoadBalancer["INTERNAL"]
# デフォルトゾーンの特定 デフォルト1件目
availabilityZone = None
for zone in availabilityZones:
availabilityZone = zone.name
#セキュリティグループ
securityGroups = []
if (isNotEmpty(awsLoadBalancer["SECURITY_GROUPS"])):
securityGroups = awsLoadBalancer["SECURITY_GROUPS"].split(",")
#サブネットID
subnetIds = []
if (isNotEmpty(awsLoadBalancer["SUBNET_ID"])):
subnetIds = awsLoadBalancer["SUBNET_ID"].split(",")
# サブネット(VPC)との関係からセキュリティグループIDを取得
securityGroupIds = []
if len(subnetIds) != 0:
for subnet in subnets:
if subnetIds[0] == subnet.subnetId:
#セキュリティグループID
for group in securityGroups:
key = group+subnet.vpcId
securityGroupIds.append(groupmap[key])
# ダミーのリスナーの設定 instancePort, instanceProtocol, loadBalancerPort, protocol, sslCertificateId
listener = Listener("65535", None, "65535","TCP",None)
listeners = [listener]
# ロードバランサの作成
dnsName = self.client.createLoadBalancer(availabilityZone, listeners, loadBalancerName, subnetIds, securityGroupIds, internal)
#実行ログ
self.logger.info(None ,"IPROCESS-200111", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbCreate", ["EC2", loadBalancerName] )
# ダミーのリスナーの削除
self.client.deleteLoadBalancerListeners(["65535",], loadBalancerName)
#クロスゾーン負荷分散を有効化
self.client.modifyLoadBalancer(loadBalancerName)
#実行ログ
self.logger.info(None ,"IPROCESS-200226", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsCrossZoneEnabled", ["EC2", loadBalancerName] )
# データベース更新
updateDict = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
updateDict["DNS_NAME"] = dnsName
sql = tableAWSLB.update(tableAWSLB.c.LOAD_BALANCER_NO ==updateDict["LOAD_BALANCER_NO"], values=updateDict)
self.conn.execute(sql)
return dnsName;
def deleteLoadBalancer(self, farmNo, loadBalancerNo) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
try :
self.client.deleteLoadBalancer(loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200112", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbDelete", ["EC2", loadBalancerName] )
except Exception:
self.logger.error(traceback.format_exc())
# データベース更新
updateDict = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
updateDict["DNS_NAME"] = None
sql = tableAWSLB.update(tableAWSLB.c.LOAD_BALANCER_NO ==updateDict["LOAD_BALANCER_NO"], values=updateDict)
self.conn.execute(sql)
def configureListeners(self, farmNo, loadBalancerNo) :
table = self.conn.getTable("LOAD_BALANCER_LISTENER")
listeners = self.conn.select(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))
# リスナーの起動・停止処理
for listener in listeners :
status = self.getStatusString(listener["STATUS"])
if isBit(listener["ENABLED"]):
if status == self.STOPPED :
# 有効で停止しているリスナーは処理対象
self.startListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
elif status == self.RUNNING:
# 有効で起動しているリスナーの場合、処理を行わずにフラグを変更する
if isBit(listener["CONFIGURE"]):
listener["CONFIGURE"] = "0"
sql = table.update(and_(table.c.LOAD_BALANCER_NO ==listener["LOAD_BALANCER_NO"], table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
self.conn.execute(sql)
else :
if (status == self.RUNNING or status == self.WARNING) :
# 無効で起動または異常なリスナーは処理対象
self.stopListener(farmNo, loadBalancerNo, listener["LOAD_BALANCER_PORT"])
elif (status == self.STOPPED) :
# 無効で停止しているリスナーの場合、処理を行わずにフラグを変更する
if isBit(listener["CONFIGURE"]):
listener["CONFIGURE"] = "0"
sql = table.update(and_(table.c.LOAD_BALANCER_NO ==loadBalancerNo, table.c.LOAD_BALANCER_PORT == listener["LOAD_BALANCER_PORT"]), values=listener)
self.conn.execute(sql)
def startListener(self, farmNo, loadBalancerNo, loadBalancerPort) :
table = self.conn.getTable("LOAD_BALANCER_LISTENER")
listener = self.conn.selectOne(table.select(and_(table.c.LOAD_BALANCER_NO==loadBalancerNo, table.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
try :
# リスナー作成情報
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
sslKey = None
if (isNotEmpty(listener["SSL_KEY_NO"])):
# リスナー作成情報
tableAWSSSL = self.conn.getTable("AWS_SSL_KEY")
awsSslKey = self.conn.selectOne(tableAWSSSL.select(tableAWSSSL.c.KEY_NO==listener["SSL_KEY_NO"]))
sslKey = awsSslKey["SSLCERTIFICATEID"]
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# リスナーの設定 instancePort, instanceProtocol, loadBalancerPort, protocol, sslCertificateId
listeners = [ Listener(listener["SERVICE_PORT"], None, listener["LOAD_BALANCER_PORT"], listener["PROTOCOL"], sslKey),]
# リスナーの作成
self.client.createLoadBalancerListeners(listeners, loadBalancerName)
#実行ログ
self.logger.info(None ,"IPROCESS-200121", [awsLoadBalancer["NAME"], listener["LOAD_BALANCER_PORT"]])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbListenerCreate", ["EC2", loadBalancerName, listener["LOAD_BALANCER_PORT"]] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスを更新
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
updateDict = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.WARNING
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
raise
# ステータスを更新
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
updateDict = self.conn.selectOne(table.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.RUNNING
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
def stopListener(self, farmNo, loadBalancerNo, loadBalancerPort) :
tableLBL = self.conn.getTable("LOAD_BALANCER_LISTENER")
listener = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
try :
# リスナー削除情報
table = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(table.select(table.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# ロードバランサポート
loadBalancerPort = listener["LOAD_BALANCER_PORT"]
loadBalancerPorts = [loadBalancerPort,]
# リスナーの削除
self.client.deleteLoadBalancerListeners(loadBalancerPorts, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200122", [awsLoadBalancer["NAME"], listener["LOAD_BALANCER_PORT"]])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbListenerDelete", ["EC2", loadBalancerName, listener["LOAD_BALANCER_PORT"]] )
except Exception, e:
self.logger.error(traceback.format_exc())
self.logger.warn(e.getMessage())
# ステータスを更新
updateDict = self.conn.selectOne(tableLBL.select(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort)))
updateDict["STATUS"] = self.STOPPED
sql = tableLBL.update(and_(tableLBL.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBL.c.LOAD_BALANCER_PORT ==loadBalancerPort), values=updateDict)
self.conn.execute(sql)
def configureHealthCheck(self, farmNo, loadBalancerNo) :
tableLBHC = self.conn.getTable("LOAD_BALANCER_HEALTH_CHECK")
healthCheck = self.conn.selectOne(tableLBHC.select(tableLBHC.c.LOAD_BALANCER_NO==loadBalancerNo))
# ヘルスチェック情報がない場合はスキップ
if not healthCheck :
return
# 現在のヘルスチェック設定を取得
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
#loadBalancerDescriptions = self.client.describeLoadBalancer(loadBalancerName)
#description = loadBalancerDescriptions[0]
description =LoadBalancerDescription(None, None , None, None, None, None, HealthCheck(None, 1, 2, 3, 4), None, None, None, None, None )
# ヘルスチェック設定を作成
target = str(healthCheck["CHECK_PROTOCOL"]) + ":" + str(healthCheck["CHECK_PORT"])
if (isNotEmpty(healthCheck["CHECK_PATH"])) :
if healthCheck["CHECK_PATH"].startswith('/') == False:
target = target + "/"
target = target + healthCheck["CHECK_PATH"]
healthCheck2 = HealthCheck(
healthCheck["HEALTHY_THRESHOLD"],
healthCheck["CHECK_INTERVAL"],
target,
healthCheck["CHECK_TIMEOUT"],
healthCheck["UNHEALTHY_THRESHOLD"])
# ヘルスチェック設定に変更がない場合はスキップ
if ((healthCheck2.target == description.healthCheck.target)
and (healthCheck2.timeout == description.healthCheck.timeout)
and (healthCheck2.interval == description.healthCheck.interval)
and (healthCheck2.healthyThreshold == description.healthCheck.healthyThreshold)
and (healthCheck2.unhealthyThreshold == description.healthCheck.unhealthyThreshold)) :
return
# ヘルスチェック設定を変更
self.client.configureHealthCheck(healthCheck2, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200131", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbHealthCheckConfig", ["EC2", loadBalancerName,] )
def applySecurityGroupsToLoadBalancer(self, farmNo, loadBalancerNo, groupmap, subnets) :
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# ロードバランサ名
loadBalancerName = awsLoadBalancer["NAME"]
# サブネットIDが設定されていなければリターン
subnetIds = []
if (isNotEmpty(awsLoadBalancer["SUBNET_ID"])):
subnetIds = awsLoadBalancer["SUBNET_ID"].split(",")
else:
return
#セキュリティグループ
securityGroups = []
if (isNotEmpty(awsLoadBalancer["SECURITY_GROUPS"])):
securityGroups = awsLoadBalancer["SECURITY_GROUPS"].split(",")
#IDへ変換
securityGroupIds = []
for subnet in subnets:
if subnetIds[0] == subnet.subnetId:
#セキュリティグループID
for group in securityGroups:
key = group+subnet.vpcId
securityGroupIds.append(groupmap[key])
# セキュリティグループ設定を変更
self.client.applySecurityGroupsToLoadBalancer(securityGroupIds, loadBalancerName);
#実行ログ
self.logger.info(None ,"IPROCESS-200225", [awsLoadBalancer["NAME"],])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbSecurityGroupsConfig", ["EC2", loadBalancerName,] )
def configureInstances(self, farmNo, loadBalancerNo) :
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
loadBalancerInstances = self.conn.select(tableLBINS.select(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo))
# 振り分け設定するインスタンスがない場合はスキップ
if not loadBalancerInstances or len(loadBalancerInstances) == 0:
return
tableLB = self.conn.getTable("LOAD_BALANCER")
loadBalancer = self.conn.selectOne(tableLB.select(tableLB.c.LOAD_BALANCER_NO==loadBalancerNo))
# 振り分けを登録・解除するインスタンスを仕分けする
enabledInstances = []
disabledInstances = []
# 振り分けするインスタンス情報を取得
instanceMap = {}
for loadBalancerInstance in loadBalancerInstances :
table = self.conn.getTable("INSTANCE")
instanceNo = loadBalancerInstance["INSTANCE_NO"]
#インスタンス獲得
instance = self.conn.selectOne(table.select(table.c.INSTANCE_NO == instanceNo))
instanceMap.update({instanceNo:instance})
# ロードバランサが無効の場合は振り分けを解除する
if not isBit(loadBalancer["ENABLED"]):
disabledInstances.append(instance)
continue;
# インスタンスが無効の場合は振り分けを解除する
if not isBit(instance["ENABLED"]):
disabledInstances.append(instance);
continue;
if isBit(loadBalancerInstance["ENABLED"]):
enabledInstances.append(instance)
else :
disabledInstances.append(instance)
# 振り分けを登録する
self.registerInstances(farmNo, loadBalancerNo, enabledInstances, loadBalancerInstances)
# 振り分けを解除する
self.unregisterInstances(farmNo, loadBalancerNo, disabledInstances, loadBalancerInstances)
def registerInstances(self, farmNo, loadBalancerNo, instances, loadBalancerInstances) :
if not instances or len(instances) == 0:
# 振り分け登録するインスタンスがない場合はスキップ
return
# 振り分けされていないインスタンス番号を抽出
tmpInstances = []
for loadBalancerInstance in loadBalancerInstances:
for instance in instances:
if instance["INSTANCE_NO"] == loadBalancerInstance["INSTANCE_NO"] :
status = self.getStatusString(loadBalancerInstance["STATUS"])
if status == self.STOPPED :
tmpInstances.append(instance)
instances = tmpInstances
# 振り分けされていないインスタンスがない場合はスキップ
if not instances or len(instances) == 0:
return
# 起動しているインスタンス番号を抽出
tmpInstanceNos = []
for instance in instances:
status = self.getStatusString(instance["STATUS"])
if status == self.RUNNING:
tmpInstanceNos.append(instance)
instances = tmpInstanceNos;
if not instances or len(instances) == 0:
# 起動しているインスタンスがない場合はスキップ
return;
# AWSインスタンスのIDを取得
instanceIds = []
tableAWSINS = self.conn.getTable("AWS_INSTANCE")
for instance in instances:
awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_NO == instance["INSTANCE_NO"]))
instanceIds.append(awsInstance["INSTANCE_ID"])
try :
# 振り分け登録
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO == loadBalancerNo))
loadBalancerName = awsLoadBalancer["NAME"]
self.client.registerInstancesWithLoadBalancer(instanceIds, loadBalancerName)
for instanceid in instanceIds:
#実行ログ
self.logger.info(None ,"IPROCESS-200141", [awsLoadBalancer["NAME"], instanceid])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbInstancesRegist", ["EC2", loadBalancerName, instanceid] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.WARNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
raise
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.RUNNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
def unregisterInstances(self, farmNo, loadBalancerNo, instances, loadBalancerInstances) :
if not instances or len(instances) == 0:
# 振り分け登録するインスタンスがない場合はスキップ
return
# 振り分けされているインスタンス番号を抽出
tmpInstances = []
for loadBalancerInstance in loadBalancerInstances:
for instance in instances:
if instance["INSTANCE_NO"] == loadBalancerInstance["INSTANCE_NO"] :
status = self.getStatusString(loadBalancerInstance["STATUS"])
if status == self.RUNNING :
tmpInstances.append(instance)
instances = tmpInstances
if not instances or len(instances) == 0:
# 振り分けされているインスタンスがない場合はスキップ
return
# 起動しているインスタンス番号を抽出
tmpInstanceNos = []
for instance in instances:
status = self.getStatusString(instance["STATUS"])
if status == self.RUNNING:
tmpInstanceNos.append(instance)
instances = tmpInstanceNos;
if not instances or len(instances) == 0:
# 起動しているインスタンスがない場合はスキップ
return;
# AWSインスタンスのIDを取得
instanceIds = []
tableAWSINS = self.conn.getTable("AWS_INSTANCE")
for instance in instances:
awsInstance = self.conn.selectOne(tableAWSINS.select(tableAWSINS.c.INSTANCE_NO == instance["INSTANCE_NO"]))
instanceIds.append(awsInstance["INSTANCE_ID"])
try :
# 振り分け解除
tableAWSLB = self.conn.getTable("AWS_LOAD_BALANCER")
awsLoadBalancer = self.conn.selectOne(tableAWSLB.select(tableAWSLB.c.LOAD_BALANCER_NO == loadBalancerNo))
loadBalancerName = awsLoadBalancer["NAME"]
self.client.deregisterInstancesFromLoadBalancer(instanceIds, loadBalancerName)
for instanceid in instanceIds:
#実行ログ
self.logger.info(None ,"IPROCESS-200142", [awsLoadBalancer["NAME"], instanceid])
# イベントログ出力
self.conn.debug(farmNo, None, None, None, None, "AwsElbInstancesDeregist", ["EC2", loadBalancerName, instanceid] )
except Exception:
self.logger.error(traceback.format_exc())
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.WARNING
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
raise
# ステータスの更新
tableLBINS = self.conn.getTable("LOAD_BALANCER_INSTANCE")
for instance in instances:
loadBalancerInstance = self.conn.selectOne(tableLBINS.select(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerNo, tableLBINS.c.INSTANCE_NO ==instance["INSTANCE_NO"])))
loadBalancerInstance["STATUS"] = self.STOPPED
sql = tableLBINS.update(and_(tableLBINS.c.LOAD_BALANCER_NO==loadBalancerInstance["LOAD_BALANCER_NO"], tableLBINS.c.INSTANCE_NO ==loadBalancerInstance["INSTANCE_NO"]), values=loadBalancerInstance)
self.conn.execute(sql)
| gpl-2.0 | -7,124,395,639,295,868,000 | 41.73741 | 211 | 0.638793 | false |
jawilson/Flexget | flexget/tests/test_plugin_interfaces.py | 8 | 1353 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import plugin
class TestInterfaces(object):
"""Test that any plugins declaring certain interfaces at least superficially comply with those interfaces."""
def get_plugins(self, interface):
plugins = list(plugin.get_plugins(interface=interface))
assert plugins, 'No plugins for this interface found.'
return plugins
def test_task_interface(self):
for p in self.get_plugins('task'):
assert isinstance(p.schema, dict), 'Task interface requires a schema to be defined.'
assert p.phase_handlers, 'Task plugins should have at least on phase handler (on_task_X) method.'
def test_list_interface(self):
for p in self.get_plugins('list'):
assert isinstance(p.schema, dict), 'List interface requires a schema to be defined.'
assert hasattr(p.instance, 'get_list'), 'List plugins must implement a get_list method.'
def test_search_interface(self):
for p in self.get_plugins('search'):
assert isinstance(p.schema, dict), 'Search interface requires a schema to be defined.'
assert hasattr(p.instance, 'search'), 'Search plugins must implement a search method.'
| mit | 4,115,027,193,662,372,400 | 49.111111 | 113 | 0.692535 | false |
arokem/nipy | nipy/algorithms/tests/test_resample.py | 3 | 9815 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from nipy.core.api import (AffineTransform, Image,
ArrayCoordMap, compose)
from nipy.core.reference import slices
from nipy.algorithms.resample import resample, resample_img2img
from nipy.io.api import load_image
from nose.tools import assert_true, assert_raises
from numpy.testing import assert_array_almost_equal
from nipy.testing import funcfile, anatfile
def test_resample_img2img():
fimg = load_image(funcfile)
aimg = load_image(anatfile)
resimg = resample_img2img(fimg, fimg)
yield assert_true, np.allclose(resimg.get_data(), fimg.get_data())
yield assert_raises, ValueError, resample_img2img, fimg, aimg
# Hackish flag for enabling of pylab plots of resamplingstest_2d_from_3d
gui_review = False
def test_rotate2d():
# Rotate an image in 2d on a square grid, should result in transposed image
g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1]))
g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1]))
i = Image(np.ones((100,100)), g)
# This sets the image data by writing into the array
i.get_data()[50:55,40:55] = 3.
a = np.array([[0,1,0],
[1,0,0],
[0,0,1]], np.float)
ir = resample(i, g2, a, (100, 100))
assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_rotate2d2():
# Rotate an image in 2d on a non-square grid, should result in transposed
# image
g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1]))
g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1]))
i = Image(np.ones((100,80)), g)
# This sets the image data by writing into the array
i.get_data()[50:55,40:55] = 3.
a = np.array([[0,1,0],
[1,0,0],
[0,0,1]], np.float)
ir = resample(i, g2, a, (80,100))
assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_rotate2d3():
# Another way to rotate/transpose the image, similar to
# test_rotate2d2 and test_rotate2d, except the world of the
# output coordmap is the same as the world of the
# original image. That is, the data is transposed on disk, but the
# output coordinates are still 'x,'y' order, not 'y', 'x' order as
# above
# this functionality may or may not be used a lot. if data is to
# be transposed but one wanted to keep the NIFTI order of output
# coords this would do the trick
g = AffineTransform.from_params('xy', 'ij', np.diag([0.5,0.7,1]))
i = Image(np.ones((100,80)), g)
# This sets the image data by writing into the array
i.get_data()[50:55,40:55] = 3.
a = np.identity(3)
g2 = AffineTransform.from_params('xy', 'ij', np.array([[0,0.5,0],
[0.7,0,0],
[0,0,1]]))
ir = resample(i, g2, a, (80,100))
assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_rotate3d():
# Rotate / transpose a 3d image on a non-square grid
g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.6,0.7,1]))
g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.7,0.6,1]))
shape = (100,90,80)
i = Image(np.ones(shape), g)
i.get_data()[50:55,40:55,30:33] = 3.
a = np.array([[1,0,0,0],
[0,0,1,0],
[0,1,0,0],
[0,0,0,1.]])
ir = resample(i, g2, a, (100,80,90))
assert_array_almost_equal(np.transpose(ir.get_data(), (0,2,1)),
i.get_data())
def test_resample2d():
g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
i = Image(np.ones((100,90)), g)
i.get_data()[50:55,40:55] = 3.
# This mapping describes a mapping from the "target" physical
# coordinates to the "image" physical coordinates. The 3x3 matrix
# below indicates that the "target" physical coordinates are related
# to the "image" physical coordinates by a shift of -4 in each
# coordinate. Or, to find the "image" physical coordinates, given
# the "target" physical coordinates, we add 4 to each "target
# coordinate". The resulting resampled image should show the
# overall image shifted -8,-8 voxels towards the origin
a = np.identity(3)
a[:2,-1] = 4.
ir = resample(i, i.coordmap, a, (100,90))
assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample2d1():
# Tests the same as test_resample2d, only using a callable instead of
# an AffineTransform instance
g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
i = Image(np.ones((100,90)), g)
i.get_data()[50:55,40:55] = 3.
a = np.identity(3)
a[:2,-1] = 4.
A = np.identity(2)
b = np.ones(2)*4
def mapper(x):
return np.dot(x, A.T) + b
ir = resample(i, i.coordmap, mapper, (100,90))
assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample2d2():
g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
i = Image(np.ones((100,90)), g)
i.get_data()[50:55,40:55] = 3.
a = np.identity(3)
a[:2,-1] = 4.
A = np.identity(2)
b = np.ones(2)*4
ir = resample(i, i.coordmap, (A, b), (100,90))
assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample2d3():
# Same as test_resample2d, only a different way of specifying
# the transform: here it is an (A,b) pair
g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
i = Image(np.ones((100,90)), g)
i.get_data()[50:55,40:55] = 3.
a = np.identity(3)
a[:2,-1] = 4.
ir = resample(i, i.coordmap, a, (100,90))
assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample3d():
g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1]))
shape = (100,90,80)
i = Image(np.ones(shape), g)
i.get_data()[50:55,40:55,30:33] = 3.
# This mapping describes a mapping from the "target" physical
# coordinates to the "image" physical coordinates. The 4x4 matrix
# below indicates that the "target" physical coordinates are related
# to the "image" physical coordinates by a shift of -4 in each
# coordinate. Or, to find the "image" physical coordinates, given
# the "target" physical coordinates, we add 4 to each "target
# coordinate". The resulting resampled image should show the
# overall image shifted [-6,-8,-10] voxels towards the origin
a = np.identity(4)
a[:3,-1] = [3,4,5]
ir = resample(i, i.coordmap, a, (100,90,80))
assert_array_almost_equal(ir.get_data()[44:49,32:47,20:23], 3.)
def test_nonaffine():
# resamples an image along a curve through the image.
#
# FIXME: use the reference.evaluate.Grid to perform this nicer
# FIXME: Remove pylab references
def curve(x): # function accept N by 1, returns N by 2
return (np.vstack([5*np.sin(x.T),5*np.cos(x.T)]).T + [52,47])
for names in (('xy', 'ij', 't', 'u'),('ij', 'xy', 't', 's')):
in_names, out_names, tin_names, tout_names = names
g = AffineTransform.from_params(in_names, out_names, np.identity(3))
img = Image(np.ones((100,90)), g)
img.get_data()[50:55,40:55] = 3.
tcoordmap = AffineTransform.from_start_step(
tin_names,
tout_names,
[0],
[np.pi*1.8/100])
ir = resample(img, tcoordmap, curve, (100,))
if gui_review:
import pylab
pylab.figure(num=3)
pylab.imshow(img, interpolation='nearest')
d = curve(np.linspace(0,1.8*np.pi,100))
pylab.plot(d[0], d[1])
pylab.gca().set_ylim([0,99])
pylab.gca().set_xlim([0,89])
pylab.figure(num=4)
pylab.plot(ir.get_data())
def test_2d_from_3d():
# Resample a 3d image on a 2d affine grid
# This example creates a coordmap that coincides with
# the 10th slice of an image, and checks that
# resampling agrees with the data in the 10th slice.
shape = (100,90,80)
g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1]))
i = Image(np.ones(shape), g)
i.get_data()[50:55,40:55,30:33] = 3.
a = np.identity(4)
g2 = ArrayCoordMap.from_shape(g, shape)[10]
ir = resample(i, g2.coordmap, a, g2.shape)
assert_array_almost_equal(ir.get_data(), i[10].get_data())
def test_slice_from_3d():
# Resample a 3d image, returning a zslice, yslice and xslice
#
# This example creates a coordmap that coincides with
# a given z, y, or x slice of an image, and checks that
# resampling agrees with the data in the given slice.
shape = (100,90,80)
g = AffineTransform.from_params('ijk',
'xyz',
np.diag([0.5,0.5,0.5,1]))
img = Image(np.ones(shape), g)
img.get_data()[50:55,40:55,30:33] = 3
I = np.identity(4)
zsl = slices.zslice(26,
((0,49.5), 100),
((0,44.5), 90),
img.reference)
ir = resample(img, zsl, I, (100, 90))
assert_array_almost_equal(ir.get_data(), img[:,:,53].get_data())
ysl = slices.yslice(22,
((0,49.5), 100),
((0,39.5), 80),
img.reference)
ir = resample(img, ysl, I, (100, 80))
assert_array_almost_equal(ir.get_data(), img[:,45,:].get_data())
xsl = slices.xslice(15.5,
((0,44.5), 90),
((0,39.5), 80),
img.reference)
ir = resample(img, xsl, I, (90, 80))
assert_array_almost_equal(ir.get_data(), img[32,:,:].get_data())
| bsd-3-clause | -3,983,556,513,998,079,000 | 38.417671 | 79 | 0.584208 | false |
home-assistant/home-assistant | homeassistant/components/philips_js/device_trigger.py | 5 | 2230 | """Provides device automations for control of device."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.device_registry import DeviceRegistry, async_get_registry
from homeassistant.helpers.typing import ConfigType
from . import PhilipsTVDataUpdateCoordinator
from .const import DOMAIN
TRIGGER_TYPE_TURN_ON = "turn_on"
TRIGGER_TYPES = {TRIGGER_TYPE_TURN_ON}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for device."""
triggers = []
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: TRIGGER_TYPE_TURN_ON,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE | None:
"""Attach a trigger."""
trigger_id = automation_info.get("trigger_id") if automation_info else None
registry: DeviceRegistry = await async_get_registry(hass)
if config[CONF_TYPE] == TRIGGER_TYPE_TURN_ON:
variables = {
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": config[CONF_DEVICE_ID],
"description": f"philips_js '{config[CONF_TYPE]}' event",
"id": trigger_id,
}
}
device = registry.async_get(config[CONF_DEVICE_ID])
for config_entry_id in device.config_entries:
coordinator: PhilipsTVDataUpdateCoordinator = hass.data[DOMAIN].get(
config_entry_id
)
if coordinator:
return coordinator.turn_on.async_attach(action, variables)
return None
| apache-2.0 | 8,366,974,317,532,377,000 | 31.318841 | 85 | 0.654709 | false |
mpetyx/DarwinsMusic | src/server/server/franz/openrdf/util/uris.py | 2 | 2282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
###############################################################################
# Copyright (c) 2006-2013 Franz Inc.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
###############################################################################
from __future__ import absolute_import
from ..exceptions import IllegalArgumentException
## Finds the index of the first local name character in an (non-relative)
## URI. This index is determined by the following the following steps:
## <ul>
## <li>Find the <em>first</em> occurrence of the '#' character,
## <li>If this fails, find the <em>last</em> occurrence of the '/'
## character,
## <li>If this fails, find the <em>last</em> occurrence of the ':'
## character.
## <li>Add <tt>1<tt> to the found index and return this value.
## </ul>
## Note that the third step should never fail as every legal (non-relative)
## URI contains at least one ':' character to seperate the scheme from the
## rest of the URI. If this fails anyway, the method will throw an
## {@link IllegalArgumentException}.
##
## @param uri
## A URI string.
## @return The index of the first local name character in the URI string. Note that
## this index does not reference an actual character if the algorithm determines
## that there is not local name. In that case, the return index is equal to the
## length of the URI string.
## @throws IllegalArgumentException
## If the supplied URI string doesn't contain any of the separator
## characters. Every legal (non-relative) URI contains at least one
## ':' character to separate the scheme from the rest of the URI.
def getLocalNameIndex(uri):
idx = uri.rfind('#')
if (idx < 0):
idx = uri.rfind('/')
if (idx < 0):
idx = uri.rfind(':')
if (idx < 0):
raise IllegalArgumentException("No separator character found in URI: " + uri)
return idx + 1
def asURIString(value):
value = str(value)
if value.startswith('<'): return value
else: return "<%s>" % value
| agpl-3.0 | 3,228,351,504,672,660,500 | 40.490909 | 85 | 0.641104 | false |
koiszzz/shadowsocks | shadowsocks/encrypt.py | 26 | 6429 | #!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import hashlib
import string
import struct
import logging
import crypto.m2
import crypto.rc4_md5
import crypto.salsa20_ctr
import crypto.ctypes_openssl
method_supported = {}
method_supported.update(crypto.rc4_md5.ciphers)
method_supported.update(crypto.salsa20_ctr.ciphers)
method_supported.update(crypto.ctypes_openssl.ciphers)
# let M2Crypto override ctypes_openssl
method_supported.update(crypto.m2.ciphers)
def random_string(length):
try:
import M2Crypto.Rand
return M2Crypto.Rand.rand_bytes(length)
except ImportError:
# TODO really strong enough on Linux?
return os.urandom(length)
cached_tables = {}
cached_keys = {}
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
(a, b) = struct.unpack('<QQ', s)
table = [c for c in string.maketrans('', '')]
for i in xrange(1, 1024):
table.sort(lambda x, y: int(a % (ord(x) + i) - a % (ord(y) + i)))
return table
def init_table(key, method=None):
if method is not None and method == 'table':
method = None
if not method:
if key in cached_tables:
return cached_tables[key]
encrypt_table = ''.join(get_table(key))
decrypt_table = string.maketrans(encrypt_table,
string.maketrans('', ''))
cached_tables[key] = [encrypt_table, decrypt_table]
else:
try:
Encryptor(key, method) # test if the settings if OK
except Exception as e:
logging.error(e)
sys.exit(1)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
password = str(password)
r = cached_keys.get(password, None)
if r:
return r
m = []
i = 0
while len(''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = ''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[password] = (key, iv)
return (key, iv)
class Encryptor(object):
def __init__(self, key, method=None):
if method == 'table':
method = None
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = ''
self.decipher = None
if method:
self.cipher = self.get_cipher(key, method, 1, iv=random_string(32))
else:
self.encrypt_table, self.decrypt_table = init_table(key)
self.cipher = None
def get_cipher_param(self, method):
method = method.lower()
m = method_supported.get(method, None)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv=None):
password = password.encode('utf-8')
method = method.lower()
m = self.get_cipher_param(method)
if m:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
if iv is None:
iv = iv_
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
logging.error('method %s not supported' % method)
sys.exit(1)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if not self.method:
return string.translate(buf, self.encrypt_table)
else:
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if not self.method:
return string.translate(buf, self.decrypt_table)
else:
if self.decipher is None:
decipher_iv_len = self.get_cipher_param(self.method)[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
if method is not None and method.lower() == 'table':
method = None
if not method:
[encrypt_table, decrypt_table] = init_table(password)
if op:
return string.translate(data, encrypt_table)
else:
return string.translate(data, decrypt_table)
else:
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
(key, _) = EVP_BytesToKey(password, key_len, iv_len)
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return ''.join(result)
| mit | -536,966,463,627,423,740 | 30.826733 | 79 | 0.588427 | false |
cjahangir/geodash-new | geonode/people/urls.py | 2 | 1870 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from geonode.people.views import CreateUser, activateuser, UserSignup
urlpatterns = patterns('geonode.people.views',
url(r'^$', TemplateView.as_view(template_name='people/profile_list.html'),
name='profile_browse'),
url(r"^edit/$", "profile_edit", name="profile_edit"),
url(r"^edit/(?P<username>[^/]*)$", "profile_edit", name="profile_edit"),
url(r"^profile/(?P<username>[^/]*)/$", "profile_detail", name="profile_detail"),
url(r'^forgotname', 'forgot_username', name='forgot_username'),
url(r'^create/$', CreateUser.as_view(), name='create-user'),
url(r'^active-inactive-user/(?P<username>[^/]*)$', activateuser, name='active-inactive-user'),
url(r"^signup/$", UserSignup.as_view(), name="user_signup"),
)
| gpl-3.0 | -5,356,872,534,226,905,000 | 50.944444 | 117 | 0.575936 | false |
storm-computers/odoo | openerp/tools/convert.py | 12 | 38867 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import cStringIO
import csv
import logging
import os.path
import re
import sys
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pytz
from lxml import etree, builder
import openerp
import openerp.release
import openerp.workflow
import assertion_report
import misc
from config import config
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from misc import SKIPPED_ELEMENT_TYPES
from misc import pickle, unquote
from openerp import SUPERUSER_ID
from translate import _
from yaml_import import convert_yaml_import
_logger = logging.getLogger(__name__)
from safe_eval import safe_eval as s_eval
safe_eval = lambda expr, ctx={}: s_eval(expr, ctx, nocopy=True)
class ParseError(Exception):
def __init__(self, msg, text, filename, lineno):
self.msg = msg
self.text = text
self.filename = filename
self.lineno = lineno
def __str__(self):
return '"%s" while parsing %s:%s, near\n%s' \
% (self.msg, self.filename, self.lineno, self.text)
def _ref(self, cr):
return lambda x: self.id_get(cr, x)
def _obj(pool, cr, uid, model_str, context=None):
model = pool[model_str]
return lambda x: model.browse(cr, uid, x, context=context)
def _get_idref(self, cr, uid, model_str, context, idref):
idref2 = dict(idref,
time=time,
DateTime=datetime,
datetime=datetime,
timedelta=timedelta,
relativedelta=relativedelta,
version=openerp.release.major_version,
ref=_ref(self, cr),
pytz=pytz)
if len(model_str):
idref2['obj'] = _obj(self.pool, cr, uid, model_str, context=context)
return idref2
def _fix_multiple_roots(node):
"""
Surround the children of the ``node`` element of an XML field with a
single root "data" element, to prevent having a document with multiple
roots once parsed separately.
XML nodes should have one root only, but we'd like to support
direct multiple roots in our partial documents (like inherited view architectures).
As a convention we'll surround multiple root with a container "data" element, to be
ignored later when parsing.
"""
real_nodes = [x for x in node if not isinstance(x, SKIPPED_ELEMENT_TYPES)]
if len(real_nodes) > 1:
data_node = etree.Element("data")
for child in node:
data_node.append(child)
node.append(data_node)
def _eval_xml(self, node, pool, cr, uid, idref, context=None):
if context is None:
context = {}
if node.tag in ('field','value'):
t = node.get('type','char')
f_model = node.get('model', '').encode('utf-8')
if node.get('search'):
f_search = node.get("search",'').encode('utf-8')
f_use = node.get("use",'id').encode('utf-8')
f_name = node.get("name",'').encode('utf-8')
idref2 = {}
if f_search:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
q = safe_eval(f_search, idref2)
ids = pool[f_model].search(cr, uid, q)
if f_use != 'id':
ids = map(lambda x: x[f_use], pool[f_model].read(cr, uid, ids, [f_use]))
_cols = pool[f_model]._columns
if (f_name in _cols) and _cols[f_name]._type=='many2many':
return ids
f_val = False
if len(ids):
f_val = ids[0]
if isinstance(f_val, tuple):
f_val = f_val[0]
return f_val
a_eval = node.get('eval','')
if a_eval:
idref2 = _get_idref(self, cr, uid, f_model, context, idref)
try:
return safe_eval(a_eval, idref2)
except Exception:
logging.getLogger('openerp.tools.convert.init').error(
'Could not eval(%s) for %s in %s', a_eval, node.get('name'), context)
raise
def _process(s, idref):
matches = re.finditer('[^%]%\((.*?)\)[ds]', s)
done = []
for m in matches:
found = m.group()[1:]
if found in done:
continue
done.append(found)
id = m.groups()[0]
if not id in idref:
idref[id] = self.id_get(cr, id)
s = s.replace(found, str(idref[id]))
s = s.replace('%%', '%') # Quite wierd but it's for (somewhat) backward compatibility sake
return s
if t == 'xml':
_fix_multiple_roots(node)
return '<?xml version="1.0"?>\n'\
+_process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
if t == 'html':
return _process("".join([etree.tostring(n, encoding='utf-8')
for n in node]), idref)
data = node.text
if node.get('file'):
with openerp.tools.file_open(node.get('file'), 'rb') as f:
data = f.read()
if t == 'file':
from ..modules import module
path = data.strip()
if not module.get_module_resource(self.module, path):
raise IOError("No such file or directory: '%s' in %s" % (
path, self.module))
return '%s,%s' % (self.module, path)
if t == 'char':
return data
if t == 'base64':
return data.encode('base64')
if t == 'int':
d = data.strip()
if d == 'None':
return None
return int(d)
if t == 'float':
return float(data.strip())
if t in ('list','tuple'):
res=[]
for n in node.iterchildren(tag='value'):
res.append(_eval_xml(self,n,pool,cr,uid,idref))
if t=='tuple':
return tuple(res)
return res
elif node.tag == "function":
args = []
a_eval = node.get('eval','')
# FIXME: should probably be exclusive
if a_eval:
idref['ref'] = lambda x: self.id_get(cr, x)
args = safe_eval(a_eval, idref)
for n in node:
return_val = _eval_xml(self,n, pool, cr, uid, idref, context)
if return_val is not None:
args.append(return_val)
model = pool[node.get('model', '')]
method = node.get('name')
res = getattr(model, method)(cr, uid, *args)
return res
elif node.tag == "test":
return node.text
class xml_import(object):
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.get(attr):
return default
val = node.get(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (len(data_node) and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (len(data_node) and data_node.get('context','').encode('utf8'))
node_context = node.get("context",'').encode('utf8')
context = {}
for ctx in (data_node_context, node_context):
if ctx:
try:
ctx_res = safe_eval(ctx, eval_dict)
if isinstance(context, dict):
context.update(ctx_res)
else:
context = ctx_res
except (ValueError, NameError):
# Some contexts contain references that are only valid at runtime at
# client-side, so in that case we keep the original context string
# as it is. We also log it, just in case.
context = ctx
_logger.debug('Context value (%s) for element with id "%s" or its data node does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
ctx, node.get('id','n/a'), exc_info=True)
return context
def get_uid(self, cr, uid, data_node, node):
node_uid = node.get('uid','') or (len(data_node) and data_node.get('uid',''))
if node_uid:
return self.id_get(cr, node_uid)
return uid
def _test_xml_id(self, xml_id):
id = xml_id
if '.' in xml_id:
module, id = xml_id.split('.', 1)
assert '.' not in id, """The ID reference "%s" must contain
maximum one dot. They are used to refer to other modules ID, in the
form: module.record_id""" % (xml_id,)
if module != self.module:
modcnt = self.pool['ir.module.module'].search_count(self.cr, self.uid, ['&', ('name', '=', module), ('state', 'in', ['installed'])])
assert modcnt == 1, """The ID "%s" refers to an uninstalled module""" % (xml_id,)
if len(id) > 64:
_logger.error('id: %s is to long (max: 64)', id)
def _tag_delete(self, cr, rec, data_node=None, mode=None):
d_model = rec.get("model")
d_search = rec.get("search",'').encode('utf-8')
d_id = rec.get("id")
ids = []
if d_search:
idref = _get_idref(self, cr, self.uid, d_model, context={}, idref={})
try:
ids = self.pool[d_model].search(cr, self.uid, safe_eval(d_search, idref))
except ValueError:
_logger.warning('Skipping deletion for failed search `%r`', d_search, exc_info=True)
pass
if d_id:
try:
ids.append(self.id_get(cr, d_id))
except ValueError:
# d_id cannot be found. doesn't matter in this case
_logger.warning('Skipping deletion for missing XML ID `%r`', d_id, exc_info=True)
pass
if ids:
self.pool[d_model].unlink(cr, self.uid, ids)
def _remove_ir_values(self, cr, name, value, model):
ir_values_obj = self.pool['ir.values']
ir_value_ids = ir_values_obj.search(cr, self.uid, [('name','=',name),('value','=',value),('model','=',model)])
if ir_value_ids:
ir_values_obj.unlink(cr, self.uid, ir_value_ids)
return True
def _tag_report(self, cr, rec, data_node=None, mode=None):
res = {}
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.get(f,'').encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('file','report_rml'),('xml','report_xml'),('xsl','report_xsl'),
('attachment','attachment'),('attachment_use','attachment_use'), ('usage','usage'),
('report_type', 'report_type'), ('parser', 'parser')):
if rec.get(field):
res[dest] = rec.get(field).encode('utf8')
if rec.get('auto'):
res['auto'] = safe_eval(rec.get('auto','False'))
if rec.get('sxw'):
sxw_content = misc.file_open(rec.get('sxw')).read()
res['report_sxw_content'] = sxw_content
if rec.get('header'):
res['header'] = safe_eval(rec.get('header','False'))
res['multi'] = rec.get('multi') and safe_eval(rec.get('multi','False'))
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('paperformat'):
pf_name = rec.get('paperformat')
pf_id = self.id_get(cr,pf_name)
res['paperformat_id'] = pf_id
id = self.pool['ir.model.data']._update(cr, self.uid, "ir.actions.report.xml", self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if not rec.get('menu') or safe_eval(rec.get('menu','False')):
keyword = str(rec.get('keyword', 'client_print_multi'))
value = 'ir.actions.report.xml,'+str(id)
ir_values_id = self.pool['ir.values'].set_action(cr, self.uid, res['name'], keyword, res['model'], value)
self.pool['ir.actions.report.xml'].write(cr, self.uid, id, {'ir_values_id': ir_values_id})
elif self.mode=='update' and safe_eval(rec.get('menu','False'))==False:
# Special check for report having attribute menu=False on update
value = 'ir.actions.report.xml,'+str(id)
self._remove_ir_values(cr, res['name'], value, res['model'])
self.pool['ir.actions.report.xml'].write(cr, self.uid, id, {'ir_values_id': False})
return id
def _tag_function(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
context = self.get_context(data_node, rec, {'ref': _ref(self, cr)})
uid = self.get_uid(cr, self.uid, data_node, rec)
_eval_xml(self,rec, self.pool, cr, uid, self.idref, context=context)
return
def _tag_act_window(self, cr, rec, data_node=None, mode=None):
name = rec.get('name','').encode('utf-8')
xml_id = rec.get('id','').encode('utf8')
self._test_xml_id(xml_id)
type = rec.get('type','').encode('utf-8') or 'ir.actions.act_window'
view_id = False
if rec.get('view_id'):
view_id = self.id_get(cr, rec.get('view_id','').encode('utf-8'))
domain = rec.get('domain','').encode('utf-8') or '[]'
res_model = rec.get('res_model','').encode('utf-8')
src_model = rec.get('src_model','').encode('utf-8')
view_type = rec.get('view_type','').encode('utf-8') or 'form'
view_mode = rec.get('view_mode','').encode('utf-8') or 'tree,form'
usage = rec.get('usage','').encode('utf-8')
limit = rec.get('limit','').encode('utf-8')
auto_refresh = rec.get('auto_refresh','').encode('utf-8')
uid = self.uid
# Act_window's 'domain' and 'context' contain mostly literals
# but they can also refer to the variables provided below
# in eval_context, so we need to eval() them before storing.
# Among the context variables, 'active_id' refers to
# the currently selected items in a list view, and only
# takes meaning at runtime on the client side. For this
# reason it must remain a bare variable in domain and context,
# even after eval() at server-side. We use the special 'unquote'
# class to achieve this effect: a string which has itself, unquoted,
# as representation.
active_id = unquote("active_id")
active_ids = unquote("active_ids")
active_model = unquote("active_model")
def ref(str_id):
return self.id_get(cr, str_id)
# Include all locals() in eval_context, for backwards compatibility
eval_context = {
'name': name,
'xml_id': xml_id,
'type': type,
'view_id': view_id,
'domain': domain,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
'uid' : uid,
'active_id': active_id,
'active_ids': active_ids,
'active_model': active_model,
'ref' : ref,
}
context = self.get_context(data_node, rec, eval_context)
try:
domain = safe_eval(domain, eval_context)
except (ValueError, NameError):
# Some domains contain references that are only valid at runtime at
# client-side, so in that case we keep the original domain string
# as it is. We also log it, just in case.
_logger.debug('Domain value (%s) for element with id "%s" does not parse '\
'at server-side, keeping original string, in case it\'s meant for client side only',
domain, xml_id or 'n/a', exc_info=True)
res = {
'name': name,
'type': type,
'view_id': view_id,
'domain': domain,
'context': context,
'res_model': res_model,
'src_model': src_model,
'view_type': view_type,
'view_mode': view_mode,
'usage': usage,
'limit': limit,
'auto_refresh': auto_refresh,
}
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
res['groups_id'] = groups_value
if rec.get('target'):
res['target'] = rec.get('target','')
if rec.get('multi'):
res['multi'] = safe_eval(rec.get('multi', 'False'))
id = self.pool['ir.model.data']._update(cr, self.uid, 'ir.actions.act_window', self.module, res, xml_id, noupdate=self.isnoupdate(data_node), mode=self.mode)
self.idref[xml_id] = int(id)
if src_model:
#keyword = 'client_action_relate'
keyword = rec.get('key2','').encode('utf-8') or 'client_action_relate'
value = 'ir.actions.act_window,'+str(id)
replace = rec.get('replace','') or True
self.pool['ir.model.data'].ir_set(cr, self.uid, 'action', keyword, xml_id, [src_model], value, replace=replace, isobject=True, xml_id=xml_id)
# TODO add remove ir.model.data
def _tag_ir_set(self, cr, rec, data_node=None, mode=None):
"""
.. deprecated:: 9.0
Use the <record> notation with ``ir.values`` as model instead.
"""
if self.mode != 'init':
return
res = {}
for field in rec.findall('./field'):
f_name = field.get("name",'').encode('utf-8')
f_val = _eval_xml(self,field,self.pool, cr, self.uid, self.idref)
res[f_name] = f_val
self.pool['ir.model.data'].ir_set(cr, self.uid, res['key'], res['key2'], res['name'], res['models'], res['value'], replace=res.get('replace',True), isobject=res.get('isobject', False), meta=res.get('meta',None))
def _tag_workflow(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
model = rec.get('model').encode('ascii')
w_ref = rec.get('ref')
if w_ref:
id = self.id_get(cr, w_ref)
else:
number_children = len(rec)
assert number_children > 0,\
'You must define a child node if you dont give a ref'
assert number_children == 1,\
'Only one child node is accepted (%d given)' % number_children
id = _eval_xml(self, rec[0], self.pool, cr, self.uid, self.idref)
uid = self.get_uid(cr, self.uid, data_node, rec)
openerp.workflow.trg_validate(
uid, model, id, rec.get('action').encode('ascii'), cr)
def _tag_menuitem(self, cr, rec, data_node=None, mode=None):
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
# The parent attribute was specified, if non-empty determine its ID, otherwise
# explicitly make a top-level menu
if rec.get('parent'):
menu_parent_id = self.id_get(cr, rec.get('parent',''))
else:
# we get here with <menuitem parent="">, explicit clear of parent, or
# if no parent attribute at all but menu name is not a menu path
menu_parent_id = False
values = {'parent_id': menu_parent_id}
if rec.get('name'):
values['name'] = rec.get('name')
try:
res = [ self.id_get(cr, rec.get('id','')) ]
except:
res = None
if rec.get('action'):
a_action = rec.get('action','').encode('utf8')
# determine the type of action
action_type, action_id = self.model_id_get(cr, a_action)
action_type = action_type.split('.')[-1] # keep only type part
values['action'] = "ir.actions.%s,%d" % (action_type, action_id)
if not values.get('name') and action_type in ('act_window', 'wizard', 'url', 'client', 'server'):
a_table = 'ir_act_%s' % action_type.replace('act_', '')
cr.execute('select name from "%s" where id=%%s' % a_table, (int(action_id),))
resw = cr.fetchone()
if resw:
values['name'] = resw[0]
if not values.get('name'):
# ensure menu has a name
values['name'] = rec_id or '?'
if rec.get('sequence'):
values['sequence'] = int(rec.get('sequence'))
if rec.get('groups'):
g_names = rec.get('groups','').split(',')
groups_value = []
for group in g_names:
if group.startswith('-'):
group_id = self.id_get(cr, group[1:])
groups_value.append((3, group_id))
else:
group_id = self.id_get(cr, group)
groups_value.append((4, group_id))
values['groups_id'] = groups_value
if not values.get('parent_id'):
if rec.get('web_icon'):
values['web_icon'] = rec.get('web_icon')
pid = self.pool['ir.model.data']._update(cr, self.uid, 'ir.ui.menu', self.module, values, rec_id, noupdate=self.isnoupdate(data_node), mode=self.mode, res_id=res and res[0] or False)
if rec_id and pid:
self.idref[rec_id] = int(pid)
return 'ir.ui.menu', pid
def _assert_equals(self, f1, f2, prec=4):
return not round(f1 - f2, prec)
def _tag_assert(self, cr, rec, data_node=None, mode=None):
if self.isnoupdate(data_node) and self.mode != 'init':
return
rec_model = rec.get("model",'').encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
self._test_xml_id(rec_id)
rec_src = rec.get("search",'').encode('utf8')
rec_src_count = rec.get("count")
rec_string = rec.get("string",'').encode('utf8') or 'unknown'
ids = None
eval_dict = {'ref': _ref(self, cr)}
context = self.get_context(data_node, rec, eval_dict)
uid = self.get_uid(cr, self.uid, data_node, rec)
if rec_id:
ids = [self.id_get(cr, rec_id)]
elif rec_src:
q = safe_eval(rec_src, eval_dict)
ids = self.pool[rec_model].search(cr, uid, q, context=context)
if rec_src_count:
count = int(rec_src_count)
if len(ids) != count:
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' Incorrect search count:\n' \
' expected count: %d\n' \
' obtained count: %d\n' \
% (rec_string, count, len(ids))
_logger.error(msg)
return
assert ids is not None,\
'You must give either an id or a search criteria'
ref = _ref(self, cr)
for id in ids:
brrec = model.browse(cr, uid, id, context)
class d(dict):
def __getitem__(self2, key):
if key in brrec:
return brrec[key]
return dict.__getitem__(self2, key)
globals_dict = d()
globals_dict['floatEqual'] = self._assert_equals
globals_dict['ref'] = ref
globals_dict['_ref'] = ref
for test in rec.findall('./test'):
f_expr = test.get("expr",'').encode('utf-8')
expected_value = _eval_xml(self, test, self.pool, cr, uid, self.idref, context=context) or True
expression_value = safe_eval(f_expr, globals_dict)
if expression_value != expected_value: # assertion failed
self.assertion_report.record_failure()
msg = 'assertion "%s" failed!\n' \
' xmltag: %s\n' \
' expected value: %r\n' \
' obtained value: %r\n' \
% (rec_string, etree.tostring(test), expected_value, expression_value)
_logger.error(msg)
return
else: # all tests were successful for this assertion tag (no break)
self.assertion_report.record_success()
def _tag_record(self, cr, rec, data_node=None, mode=None):
rec_model = rec.get("model").encode('ascii')
model = self.pool[rec_model]
rec_id = rec.get("id",'').encode('ascii')
rec_context = rec.get("context", {})
if rec_context:
rec_context = safe_eval(rec_context)
if self.xml_filename and rec_id:
rec_context['install_mode_data'] = dict(
xml_file=self.xml_filename,
xml_id=rec_id,
model=rec_model,
)
self._test_xml_id(rec_id)
# in update mode, the record won't be updated if the data node explicitely
# opt-out using @noupdate="1". A second check will be performed in
# ir.model.data#_update() using the record's ir.model.data `noupdate` field.
if self.isnoupdate(data_node) and self.mode != 'init':
# check if the xml record has no id, skip
if not rec_id:
return None
if '.' in rec_id:
module,rec_id2 = rec_id.split('.')
else:
module = self.module
rec_id2 = rec_id
id = self.pool['ir.model.data']._update_dummy(cr, self.uid, rec_model, module, rec_id2)
if id:
# if the resource already exists, don't update it but store
# its database id (can be useful)
self.idref[rec_id] = int(id)
return None
elif not self.nodeattr2bool(rec, 'forcecreate', True):
# if it doesn't exist and we shouldn't create it, skip it
return None
# else create it normally
res = {}
for field in rec.findall('./field'):
#TODO: most of this code is duplicated above (in _eval_xml)...
f_name = field.get("name").encode('utf-8')
f_ref = field.get("ref",'').encode('utf-8')
f_search = field.get("search",'').encode('utf-8')
f_model = field.get("model",'').encode('utf-8')
if not f_model and f_name in model._fields:
f_model = model._fields[f_name].comodel_name
f_use = field.get("use",'').encode('utf-8') or 'id'
f_val = False
if f_search:
q = safe_eval(f_search, self.idref)
assert f_model, 'Define an attribute model="..." in your .XML file !'
f_obj = self.pool[f_model]
# browse the objects searched
s = f_obj.browse(cr, self.uid, f_obj.search(cr, self.uid, q))
# column definitions of the "local" object
_fields = self.pool[rec_model]._fields
# if the current field is many2many
if (f_name in _fields) and _fields[f_name].type == 'many2many':
f_val = [(6, 0, map(lambda x: x[f_use], s))]
elif len(s):
# otherwise (we are probably in a many2one field),
# take the first element of the search
f_val = s[0][f_use]
elif f_ref:
if f_name in model._fields and model._fields[f_name].type == 'reference':
val = self.model_id_get(cr, f_ref)
f_val = val[0] + ',' + str(val[1])
else:
f_val = self.id_get(cr, f_ref)
else:
f_val = _eval_xml(self,field, self.pool, cr, self.uid, self.idref)
if f_name in model._fields:
if model._fields[f_name].type == 'integer':
f_val = int(f_val)
res[f_name] = f_val
id = self.pool['ir.model.data']._update(cr, self.uid, rec_model, self.module, res, rec_id or False, not self.isnoupdate(data_node), noupdate=self.isnoupdate(data_node), mode=self.mode, context=rec_context )
if rec_id:
self.idref[rec_id] = int(id)
if config.get('import_partial'):
cr.commit()
return rec_model, id
def _tag_template(self, cr, el, data_node=None, mode=None):
# This helper transforms a <template> element into a <record> and forwards it
tpl_id = el.get('id', el.get('t-name', '')).encode('ascii')
full_tpl_id = tpl_id
if '.' not in full_tpl_id:
full_tpl_id = '%s.%s' % (self.module, tpl_id)
# set the full template name for qweb <module>.<id>
if not el.get('inherit_id'):
el.set('t-name', full_tpl_id)
el.tag = 't'
else:
el.tag = 'data'
el.attrib.pop('id', None)
record_attrs = {
'id': tpl_id,
'model': 'ir.ui.view',
}
for att in ['forcecreate', 'context']:
if att in el.keys():
record_attrs[att] = el.attrib.pop(att)
Field = builder.E.field
name = el.get('name', tpl_id)
record = etree.Element('record', attrib=record_attrs)
record.append(Field(name, name='name'))
record.append(Field(full_tpl_id, name='key'))
record.append(Field("qweb", name='type'))
if 'priority' in el.attrib:
record.append(Field(el.get('priority'), name='priority'))
if 'inherit_id' in el.attrib:
record.append(Field(name='inherit_id', ref=el.get('inherit_id')))
if 'website_id' in el.attrib:
record.append(Field(name='website_id', ref=el.get('website_id')))
if 'key' in el.attrib:
record.append(Field(el.get('key'), name='key'))
if el.get('active') in ("True", "False"):
view_id = self.id_get(cr, tpl_id, raise_if_not_found=False)
if mode != "update" or not view_id:
record.append(Field(name='active', eval=el.get('active')))
if el.get('customize_show') in ("True", "False"):
record.append(Field(name='customize_show', eval=el.get('customize_show')))
groups = el.attrib.pop('groups', None)
if groups:
grp_lst = map(lambda x: "ref('%s')" % x, groups.split(','))
record.append(Field(name="groups_id", eval="[(6, 0, ["+', '.join(grp_lst)+"])]"))
if el.attrib.pop('page', None) == 'True':
record.append(Field(name="page", eval="True"))
if el.get('primary') == 'True':
# Pseudo clone mode, we'll set the t-name to the full canonical xmlid
el.append(
builder.E.xpath(
builder.E.attribute(full_tpl_id, name='t-name'),
expr=".",
position="attributes",
)
)
record.append(Field('primary', name='mode'))
# inject complete <template> element (after changing node name) into
# the ``arch`` field
record.append(Field(el, name="arch", type="xml"))
return self._tag_record(cr, record, data_node)
def id_get(self, cr, id_str, raise_if_not_found=True):
if id_str in self.idref:
return self.idref[id_str]
res = self.model_id_get(cr, id_str, raise_if_not_found)
if res and len(res)>1: res = res[1]
return res
def model_id_get(self, cr, id_str, raise_if_not_found=True):
model_data_obj = self.pool['ir.model.data']
mod = self.module
if '.' not in id_str:
id_str = '%s.%s' % (mod, id_str)
return model_data_obj.xmlid_to_res_model_res_id(
cr, self.uid, id_str,
raise_if_not_found=raise_if_not_found)
def parse(self, de, mode=None):
roots = ['openerp','data','odoo']
if de.tag not in roots:
raise Exception("Root xml tag must be <openerp>, <odoo> or <data>.")
for rec in de:
if rec.tag in roots:
self.parse(rec, mode)
elif rec.tag in self._tags:
try:
self._tags[rec.tag](self.cr, rec, de, mode=mode)
except Exception, e:
self.cr.rollback()
exc_info = sys.exc_info()
raise ParseError, (misc.ustr(e), etree.tostring(rec).rstrip(), rec.getroottree().docinfo.URL, rec.sourceline), exc_info[2]
return True
def __init__(self, cr, module, idref, mode, report=None, noupdate=False, xml_filename=None):
self.mode = mode
self.module = module
self.cr = cr
self.idref = idref
self.pool = openerp.registry(cr.dbname)
self.uid = 1
if report is None:
report = assertion_report.assertion_report()
self.assertion_report = report
self.noupdate = noupdate
self.xml_filename = xml_filename
self._tags = {
'record': self._tag_record,
'delete': self._tag_delete,
'function': self._tag_function,
'menuitem': self._tag_menuitem,
'template': self._tag_template,
'workflow': self._tag_workflow,
'report': self._tag_report,
'ir_set': self._tag_ir_set, # deprecated:: 9.0
'act_window': self._tag_act_window,
'assert': self._tag_assert,
}
def convert_file(cr, module, filename, idref, mode='update', noupdate=False, kind=None, report=None, pathname=None):
if pathname is None:
pathname = os.path.join(module, filename)
fp = misc.file_open(pathname)
ext = os.path.splitext(filename)[1].lower()
try:
if ext == '.csv':
convert_csv_import(cr, module, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
convert_sql_import(cr, fp)
elif ext == '.yml':
convert_yaml_import(cr, module, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
convert_xml_import(cr, module, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
raise ValueError("Can't load unknown file type %s.", filename)
finally:
fp.close()
def convert_sql_import(cr, fp):
cr.execute(fp.read())
def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
noupdate=False):
'''Import csv file :
quote: "
delimiter: ,
encoding: utf-8'''
if not idref:
idref={}
model = ('.'.join(fname.split('.')[:-1]).split('-'))[0]
#remove folder path from model
head, model = os.path.split(model)
input = cStringIO.StringIO(csvcontent) #FIXME
reader = csv.reader(input, quotechar='"', delimiter=',')
fields = reader.next()
fname_partial = ""
if config.get('import_partial'):
fname_partial = module + '/'+ fname
if not os.path.isfile(config.get('import_partial')):
pickle.dump({}, file(config.get('import_partial'),'w+'))
else:
data = pickle.load(file(config.get('import_partial')))
if fname_partial in data:
if not data[fname_partial]:
return
else:
for i in range(data[fname_partial]):
reader.next()
if not (mode == 'init' or 'id' in fields):
_logger.error("Import specification does not contain 'id' and we are in init mode, Cannot continue.")
return
uid = 1
datas = []
for line in reader:
if not (line and any(line)):
continue
try:
datas.append(map(misc.ustr, line))
except:
_logger.error("Cannot import the line: %s", line)
registry = openerp.registry(cr.dbname)
result, rows, warning_msg, dummy = registry[model].import_data(cr, uid, fields, datas,mode, module, noupdate, filename=fname_partial)
if result < 0:
# Report failed import and abort module install
raise Exception(_('Module loading %s failed: file %s could not be processed:\n %s') % (module, fname, warning_msg))
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
data[fname_partial] = 0
pickle.dump(data, file(config.get('import_partial'),'wb'))
cr.commit()
def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=False, report=None):
doc = etree.parse(xmlfile)
relaxng = etree.RelaxNG(
etree.parse(os.path.join(config['root_path'],'import_xml.rng' )))
try:
relaxng.assert_(doc)
except Exception:
_logger.info('The XML file does not fit the required schema !', exc_info=True)
_logger.info(misc.ustr(relaxng.error_log.last_error))
raise
if idref is None:
idref={}
if isinstance(xmlfile, file):
xml_filename = xmlfile.name
else:
xml_filename = xmlfile
obj = xml_import(cr, module, idref, mode, report=report, noupdate=noupdate, xml_filename=xml_filename)
obj.parse(doc.getroot(), mode=mode)
return True
| agpl-3.0 | -6,659,251,375,356,320,000 | 40.658092 | 219 | 0.527826 | false |
jralls/gramps | gramps/gen/plug/report/stdoptions.py | 2 | 14579 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2013 John Ralls <[email protected]>
# Copyright (C) 2013-2017 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Commonly used report options. Call the function, don't copy the code!
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...config import config
from ...datehandler import get_date_formats, LANG_TO_DISPLAY, main_locale
from ...display.name import displayer as global_name_display
from ...lib.date import Today
from ..menu import EnumeratedListOption, BooleanOption, NumberOption
from ...proxy import PrivateProxyDb, LivingProxyDb
from ...utils.grampslocale import GrampsLocale
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
#-------------------------------------------------------------------------
#
# StandardReportOptions
#
#-------------------------------------------------------------------------
def add_localization_option(menu, category):
"""
Insert an option for localizing the report into a different locale
from the UI locale.
"""
trans = EnumeratedListOption(_("Translation"),
glocale.DEFAULT_TRANSLATION_STR)
trans.add_item(glocale.DEFAULT_TRANSLATION_STR, _("Default"))
languages = glocale.get_language_dict()
for language in sorted(languages, key=glocale.sort_key):
trans.add_item(languages[language], language)
trans.set_help(_("The translation to be used for the report."))
menu.add_option(category, "trans", trans)
return trans
def add_name_format_option(menu, category):
"""
Insert an option for changing the report's name format to a
report-specific format instead of the user's Edit=>Preferences choice
"""
name_format = EnumeratedListOption(_("Name format"), 0)
name_format.add_item(0, _("Default"))
format_list = global_name_display.get_name_format()
for number, name, format_string, whether_active in format_list:
name_format.add_item(number, name)
name_format.set_help(_("Select the format to display names"))
current_format = config.get('preferences.name-format')
# if this report hasn't ever been run, start with user's current setting
name_format.set_value(current_format)
# if the report has been run, this line will get the user's old setting
menu.add_option(category, "name_format", name_format)
return name_format
def run_name_format_option(report, menu):
"""
Run the option for changing the report's name format to a
report-specific format instead of the user's Edit=>Preferences choice
"""
current_format = config.get('preferences.name-format')
name_format = menu.get_option_by_name("name_format").get_value()
if name_format != current_format:
report._name_display.set_default_format(name_format)
return name_format
def add_private_data_option(menu, category, default=True):
"""
Insert an option for deciding whether the information in the
database marked "private" shall be included in the report
Since historically, before this option, the entire database was
used, including private information, the default for this option
has been set to be True, to include such private information.
"""
incl_private = BooleanOption(_("Include data marked private"), default)
incl_private.set_help(_("Whether to include private data"))
menu.add_option(category, "incl_private", incl_private)
def run_private_data_option(report, menu):
"""
Run the option for deciding whether the information in the
database marked "private" shall be included in the report
"""
include_private_data = menu.get_option_by_name('incl_private').get_value()
if not include_private_data:
report.database = PrivateProxyDb(report.database)
def add_living_people_option(menu, category,
mode=LivingProxyDb.MODE_INCLUDE_ALL,
after_death_years=0,
process_names=True):
"""
Insert an option for deciding how the information in the
database for living people shall be handled by the report
Because historically, before this option, the entire database was
used, including all information on all living people, the default
mode for this option has been set to include all such information
The value of the "living_people" option is the same as the value
of the "mode" argument in the call to the LivingProxyDb proxy DB
:param menu: The menu the options should be added to.
:type menu: :class:`.Menu`
:param category: A label that describes the category that the option
belongs to.
Example: "Report Options"
:type category: string
:param mode:
The method for handling living people.
LivingProxyDb.MODE_EXCLUDE_ALL will remove living people altogether.
LivingProxyDb.MODE_INCLUDE_LAST_NAME_ONLY will remove all
information and change their given name to "[Living]" or whatever
has been set in Preferences -> Text -> Private given name.
LivingProxyDb.MODE_REPLACE_COMPLETE_NAME will remove all
information and change their given name and surname to
"[Living]" or whatever has been set in Preferences -> Text
for Private surname and Private given name.
LivingProxyDb.MODE_INCLUDE_FULL_NAME_ONLY will remove all
information but leave the entire name intact.
LivingProxyDb.MODE_INCLUDE_ALL will not invoke LivingProxyDb at all.
:type mode: int
:param after_death_years:
The number of years after a person's death to
still consider them as living.
:type after_death_years: int
:return: nothing
:param process_names: whether to offer name-oriented option choices
:type process_names: Boolean
"""
def living_people_changed():
"""
Handle a change in the living_people option
"""
if living_people.get_value() == LivingProxyDb.MODE_INCLUDE_ALL:
years_past_death.set_available(False)
else:
years_past_death.set_available(True)
living_people = EnumeratedListOption(_("Living People"), mode)
items = [(LivingProxyDb.MODE_INCLUDE_ALL,
_T_("'living people'|Included, and all data"))]
if process_names:
items += [
(LivingProxyDb.MODE_INCLUDE_FULL_NAME_ONLY,
_T_("'living people'|Full names, but data removed")),
(LivingProxyDb.MODE_INCLUDE_LAST_NAME_ONLY,
_T_("'living people'|Given names replaced, and data removed")),
(LivingProxyDb.MODE_REPLACE_COMPLETE_NAME,
_T_("'living people'|Complete names replaced, and data removed"))]
items += [(LivingProxyDb.MODE_EXCLUDE_ALL,
_T_("'living people'|Not included"))]
living_people.set_items(items, xml_items=True) # for deferred translation
living_people.set_help(_("How to handle living people"))
menu.add_option(category, "living_people", living_people)
living_people.connect('value-changed', living_people_changed)
years_past_death = NumberOption(_("Years from death to consider living"),
after_death_years, 0, 100)
years_past_death.set_help(
_("Whether to restrict data on recently-dead people"))
menu.add_option(category, "years_past_death", years_past_death)
living_people_changed()
def run_living_people_option(report, menu, llocale=glocale):
"""
Run the option for deciding how the information in the
database for living people shall be handled by the report
If llocale is passed in (a :class:`.GrampsLocale`), then (insofar as
possible) the translated values will be returned instead.
:param llocale: allow deferred translation of "[Living]"
:type llocale: a :class:`.GrampsLocale` instance
"""
option = menu.get_option_by_name('living_people')
living_value = option.get_value()
years_past_death = menu.get_option_by_name('years_past_death').get_value()
if living_value != LivingProxyDb.MODE_INCLUDE_ALL:
report.database = LivingProxyDb(report.database, living_value,
years_after_death=years_past_death,
llocale=llocale)
return option
def add_date_format_option(menu, category, localization_option):
"""
Insert an option for changing the report's date format to a
report-specific format instead of the user's Edit=>Preferences choice
:param localization_option: allow realtime translation of date formats
:type localization_option: a :class:`.EnumeratedListOption` instance
"""
def on_trans_value_changed():
"""
Handle a change in the localization option (inside the date-format one)
"""
lang = localization_option.get_value()
if lang == GrampsLocale.DEFAULT_TRANSLATION_STR: # the UI language
vlocale = glocale
elif lang in LANG_TO_DISPLAY: # a displayer exists
vlocale = LANG_TO_DISPLAY[lang]._locale # locale is already loaded
else: # no displayer
vlocale = GrampsLocale(lang=main_locale[lang])
ldd = vlocale.date_displayer
target_format_list = get_date_formats(vlocale) # get localized formats
# trans_text is a defined keyword (see po/update_po.py, po/genpot.sh)
trans_text = vlocale.translation.sgettext
if global_date_format < len(target_format_list):
ldd.set_format(global_date_format)
example = vlocale.get_date(today)
target_option_list = [
(0, "%s (%s) (%s)" % (trans_text("Default"),
target_format_list[global_date_format],
example))]
else:
target_option_list = [(0, trans_text("Default"))]
for fmt in target_format_list:
index = target_format_list.index(fmt) + 1 # option default = 0
ldd.set_format(index - 1)
example = vlocale.get_date(today)
target_option_list += [(index, fmt + ' (%s)' % example)]
date_option.set_items(target_option_list)
today = Today()
date_option = EnumeratedListOption(_("Date format"), 0)
global_date_format = config.get('preferences.date-format')
on_trans_value_changed()
date_option.set_help(_("The format and language for dates, with examples"))
# if this report hasn't ever been run, start with user's current setting
date_option.set_value(global_date_format + 1)
# if the report has been run, this line will get the user's old setting
menu.add_option(category, 'date_format', date_option)
localization_option.connect('value-changed', on_trans_value_changed)
def run_date_format_option(report, menu):
"""
Run the option for changing the report's date format to a
report-specific format instead of the user's Edit=>Preferences choice
"""
def warning(value):
"""
Convenience function for the error message.
The 'value' (starting at '0') is the index in the option choices list
(as opposed to the index (starting at '0') in the target format list,
which is shorter by one, so corresponding offsets are made).
"""
target_format_choices = date_option.get_items() # with numbers
report._user.warn(
_("Ignoring unknown option: %s") % value,
_("Valid values: ") + str(target_format_choices)
+ '\n\n' +
_("Using options string: %s") % str(target_format_list[0])
) # the previous line's "0" is because ISO is always the fallback
target_format_list = get_date_formats(report._locale)
date_option = menu.get_option_by_name('date_format')
date_opt_value = date_option.get_value()
if date_opt_value == 0: # "default"
format_to_be = config.get('preferences.date-format') # the UI choice
elif date_opt_value <= len(target_format_list):
format_to_be = date_opt_value - 1
else:
warning(date_opt_value)
format_to_be = 0 # ISO always exists
if format_to_be + 1 > len(target_format_list):
warning(format_to_be + 1)
format_to_be = 0 # ISO always exists
report._ldd.set_format(format_to_be)
def add_gramps_id_option(menu, category, ownline=False):
"""
Insert an option for deciding whether to include Gramps IDs
in the report
Since for some reports it makes sense to possibly have the ID on its
own line (e.g. Graphviz reports), that possibility is included, but
since for most reports it won't make sense the default is False
:param menu: The menu the options should be added to.
:type menu: :class:`.Menu`
:param category: A label that describes the category that the option
belongs to, e.g. "Report Options"
:type category: string
:param ownline: whether the option offers to have the ID on its own line
:type ownline: Boolean
"""
include_id = EnumeratedListOption(_('Gramps ID'), 0)
include_id.add_item(0, _('Do not include'))
if ownline:
include_id.add_item(1, _('Share an existing line'))
include_id.add_item(2, _('On a line of its own'))
include_id.set_help(_('Whether (and where) to include Gramps IDs'))
else:
include_id.add_item(1, _('Include'))
include_id.set_help(_("Whether to include Gramps IDs"))
menu.add_option(category, 'inc_id', include_id)
| gpl-2.0 | 2,420,177,266,173,407,700 | 43.31307 | 79 | 0.651211 | false |
archetipo/server-tools | base_external_dbsource/base_external_dbsource.py | 2 | 6784 | # -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import logging
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
CONNECTORS = []
try:
import sqlalchemy
CONNECTORS.append(('sqlite', 'SQLite'))
try:
import pymssql
CONNECTORS.append(('mssql', 'Microsoft SQL Server'))
assert pymssql
except (ImportError, AssertionError):
_logger.info('MS SQL Server not available. Please install "pymssql"\
python package.')
try:
import MySQLdb
CONNECTORS.append(('mysql', 'MySQL'))
assert MySQLdb
except (ImportError, AssertionError):
_logger.info('MySQL not available. Please install "mysqldb"\
python package.')
except:
_logger.info('SQL Alchemy not available. Please install "slqalchemy"\
python package.')
try:
import pyodbc
CONNECTORS.append(('pyodbc', 'ODBC'))
except:
_logger.info('ODBC libraries not available. Please install "unixodbc"\
and "python-pyodbc" packages.')
try:
import cx_Oracle
CONNECTORS.append(('cx_Oracle', 'Oracle'))
except:
_logger.info('Oracle libraries not available. Please install "cx_Oracle"\
python package.')
CONNECTORS.append(('postgresql', 'PostgreSQL'))
class base_external_dbsource(orm.Model):
_name = "base.external.dbsource"
_description = 'External Database Sources'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'conn_string': fields.text('Connection string', help="""
Sample connection strings:
- Microsoft SQL Server:
mssql+pymssql://username:%s@server:port/dbname?charset=utf8
- MySQL: mysql://user:%s@server:port/dbname
- ODBC: DRIVER={FreeTDS};SERVER=server.address;Database=mydb;UID=sa
- ORACLE: username/%s@//server.address:port/instance
- PostgreSQL:
dbname='template1' user='dbuser' host='localhost' port='5432' password=%s
- SQLite: sqlite:///test.db
"""),
'password': fields.char('Password', size=40),
'connector': fields.selection(CONNECTORS, 'Connector',
required=True,
help="If a connector is missing from the\
list, check the server log to confirm\
that the required components were\
detected."),
}
def conn_open(self, cr, uid, id1):
# Get dbsource record
data = self.browse(cr, uid, id1)
# Build the full connection string
connStr = data.conn_string
if data.password:
if '%s' not in data.conn_string:
connStr += ';PWD=%s'
connStr = connStr % data.password
# Try to connect
if data.connector == 'cx_Oracle':
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.UTF8'
conn = cx_Oracle.connect(connStr)
elif data.connector == 'pyodbc':
conn = pyodbc.connect(connStr)
elif data.connector in ('sqlite', 'mysql', 'mssql'):
conn = sqlalchemy.create_engine(connStr).connect()
elif data.connector == 'postgresql':
conn = psycopg2.connect(connStr)
return conn
def execute(self, cr, uid, ids, sqlquery, sqlparams=None, metadata=False,
context=None):
"""Executes SQL and returns a list of rows.
"sqlparams" can be a dict of values, that can be referenced in
the SQL statement using "%(key)s" or, in the case of Oracle,
":key".
Example:
sqlquery = "select * from mytable where city = %(city)s and
date > %(dt)s"
params = {'city': 'Lisbon',
'dt': datetime.datetime(2000, 12, 31)}
If metadata=True, it will instead return a dict containing the
rows list and the columns list, in the format:
{ 'cols': [ 'col_a', 'col_b', ...]
, 'rows': [ (a0, b0, ...), (a1, b1, ...), ...] }
"""
data = self.browse(cr, uid, ids)
rows, cols = list(), list()
for obj in data:
conn = self.conn_open(cr, uid, obj.id)
if obj.connector in ["sqlite", "mysql", "mssql"]:
# using sqlalchemy
cur = conn.execute(sqlquery, sqlparams)
if metadata:
cols = cur.keys()
rows = [r for r in cur]
else:
# using other db connectors
cur = conn.cursor()
cur.execute(sqlquery, sqlparams)
if metadata:
cols = [d[0] for d in cur.description]
rows = cur.fetchall()
conn.close()
if metadata:
return{'cols': cols, 'rows': rows}
else:
return rows
def connection_test(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context):
conn = False
try:
conn = self.conn_open(cr, uid, obj.id)
except Exception as e:
raise orm.except_orm(_("Connection test failed!"),
_("Here is what we got instead:\n %s")
% tools.ustr(e))
finally:
try:
if conn:
conn.close()
except Exception:
# ignored, just a consequence of the previous exception
pass
# TODO: if OK a (wizard) message box should be displayed
raise orm.except_orm(_("Connection test succeeded!"),
_("Everything seems properly set up!"))
| agpl-3.0 | -7,047,281,655,317,171,000 | 36.899441 | 79 | 0.544664 | false |
birkin/channels_exploration_project | primes_app/views.py | 1 | 1127 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime, logging
from channels import Channel
from django.http import HttpResponse
log = logging.getLogger(__name__)
def hi( request ):
""" Returns simplest response. """
now = datetime.datetime.now()
log.debug( 'now, `{}`'.format(now) )
rsp = HttpResponse( '<p>hi</p> <p>( {} )</p>'.format(now) )
log.debug( 'rsp, `{}`'.format(rsp) )
try:
return rsp
except Exception as e:
log.error( 'exception, ```{}```'.format(unicode(repr(e))) )
def regular_is_prime( request ):
log.debug( 'starting response_regular()' )
param = request.GET.get( 'foo', '' )
if param:
response_string = 'regular response -- {}'.format( param )
else:
response_string = 'regular response'
return HttpResponse( response_string )
def channels_is_prime( request ):
log.debug( 'starting response_channels()' )
return HttpResponse( 'zz' )
result = Channel('make-response').send( {'foo': 'bar'} )
log.debug( 'type(result), `{}`'.format(type(result)) )
return HttpResponse( result )
| mit | -139,578,035,403,708,100 | 27.897436 | 67 | 0.61402 | false |
LandRegistry-Attic/matching-alpha | matching/models.py | 1 | 1689 | from sqlalchemy.dialects.postgresql import TEXT
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.dialects.postgresql import BOOLEAN
from sqlalchemy.types import Enum
from matching import db
roles_users = db.Table('roles_users',
db.Column('user_lrid',UUID(as_uuid=True), db.ForeignKey('users.lrid')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str({
'name': self.name
})
class User(db.Model):
__tablename__ = 'users'
lrid = db.Column(UUID(as_uuid=True), nullable=False, primary_key=True)
name = db.Column(TEXT, nullable=False)
date_of_birth = db.Column(db.Date(), nullable=False)
gender = db.Column(Enum('F', 'M', name='gender_types'), nullable=False)
current_address = db.Column(TEXT, nullable=False)
previous_address = db.Column(TEXT, nullable=False)
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __repr__(self):
return str({
'lrid': self.lrid,
'name': self.name,
'date of birth': self.date_of_birth,
'gender': self.gender,
'current address': self.current_address,
'previous address': self.previous_address,
'roles': self.roles
})
| mit | 2,334,935,415,609,113,600 | 32.117647 | 79 | 0.606276 | false |
shaunstanislaus/grab | grab/base.py | 8 | 23594 | # -*- coding: utf-8 -*-
# Copyright: 2011, Grigoriy Petukhov
# Author: Grigoriy Petukhov (http://lorien.name)
# License: BSD
"""
The core of grab package: the Grab class.
"""
from __future__ import absolute_import
import logging
import os
from random import randint
from copy import copy, deepcopy
import threading
import itertools
import collections
from six.moves.urllib.parse import urljoin
import email
from datetime import datetime
import weakref
import six
from weblib.html import find_refresh_url, find_base_url
from grab.document import Document
from grab import error
from weblib.http import normalize_http_values
from grab.cookie import CookieManager
from grab.proxylist import ProxyList, parse_proxy_line
from grab.deprecated import DeprecatedThings
__all__ = ('Grab',)
# This counter will used in enumerating network queries.
# Its value will be displayed in logging messages and also used
# in names of dumps
# I use mutable module variable to allow different
# instances of Grab to maintain single counter
# This could be helpful in debugging when your script
# creates multiple Grab instances - in case of shared counter
# grab instances do not overwrite dump logs
REQUEST_COUNTER = itertools.count(1)
GLOBAL_STATE = {
'dom_build_time': 0,
}
MUTABLE_CONFIG_KEYS = ['post', 'multipart_post', 'headers', 'cookies']
TRANSPORT_CACHE = {}
logger = logging.getLogger('grab.base')
# Logger to handle network activity
# It is done as separate logger to allow you easily
# control network logging separately from other grab logs
logger_network = logging.getLogger('grab.network')
def reset_request_counter():
global REQUEST_COUNTER
REQUEST_COUNTER = itertools.count(1)
def copy_config(config, mutable_config_keys=MUTABLE_CONFIG_KEYS):
"""
Copy grab config with correct handling of mutable config values.
"""
cloned_config = copy(config)
# Apply ``copy`` function to mutable config values
for key in mutable_config_keys:
cloned_config[key] = copy(config[key])
return cloned_config
def default_config():
# TODO: Maybe config should be splitted into two entities:
# 1) config which is not changed during request
# 2) changeable settings
return dict(
# Common
url=None,
# Debugging
log_file=None,
log_dir=False,
debug_post=False,
debug_post_limit=150,
# Only for curl transport
debug=False,
verbose_logging=False,
# Only for selenium transport
webdriver='firefox',
selenium_wait=1, # in seconds
# Proxy
proxy=None,
proxy_type=None,
proxy_userpwd=None,
proxy_auto_change=True,
# Method, Post
method=None,
post=None,
multipart_post=None,
# Headers, User-Agent, Referer
headers={},
common_headers={},
user_agent=None,
user_agent_file=None,
referer=None,
reuse_referer=True,
# Cookies
cookies={},
reuse_cookies=True,
cookiefile=None,
# Timeouts
timeout=15,
connect_timeout=3,
# Connection
connection_reuse=True,
# Response processing
nobody=False,
body_maxsize=None,
body_inmemory=True,
body_storage_dir=None,
body_storage_filename=None,
body_storage_create_dir=False,
reject_file_size=None,
# Content compression
encoding='gzip',
# Network interface
interface=None,
# Redirects
follow_refresh=False,
follow_location=True,
refresh_redirect_count=0,
redirect_limit=10,
# Authentication
userpwd=None,
# Character set to which any unicode data should be encoded
# before get placed in request
# This setting is overwritten after each request with
# charset of retrieved document
charset='utf-8',
# Charset to use for converting content of response
# into unicode, by default it is detected automatically
document_charset=None,
# Content type control how DOM are built
# For html type HTML DOM builder is used
# For xml type XML DOM builder is used
content_type='html',
# Fix &#X; entities, where X between 128 and 160
# Such entities are parsed by modern browsers as
# windows-1251 entities independently of the real charset of
# the document, If this option is True then such entities
# will be replaced with correct unicode entities e.g.:
# — -> —
fix_special_entities=True,
# Convert document body to lower case before building LXML tree
# It does not affect `self.doc.body`
lowercased_tree=False,
# Strip null bytes from document body before building lXML tree
# It does not affect `self.doc.body`
strip_null_bytes=True,
# Internal object to store
state={},
)
class Grab(DeprecatedThings):
__slots__ = ('request_head', 'request_log', 'request_body',
'proxylist', 'config',
'transport',
'transport_param', 'request_method', 'request_counter',
'__weakref__', 'cookies',
'meta',
# Dirty hack to make it possible to inherit Grab from
# multiple base classes with __slots__
'_doc',
)
# Attributes which should be processed when clone
# of Grab instance is creating
clonable_attributes = ('request_head', 'request_log', 'request_body',
'proxylist')
# Complex config items which points to mutable objects
mutable_config_keys = copy(MUTABLE_CONFIG_KEYS)
"""
Public methods
"""
def __init__(self, document_body=None,
transport='grab.transport.curl.CurlTransport', **kwargs):
"""
Create Grab instance
"""
self.meta = {}
self._doc = None
self.config = default_config()
self.config['common_headers'] = self.common_headers()
self.cookies = CookieManager()
self.proxylist = ProxyList()
self.setup_transport(transport)
self.reset()
if kwargs:
self.setup(**kwargs)
if document_body is not None:
self.setup_document(document_body)
def _get_doc(self):
if self._doc is None:
self._doc = Document(self)
return self._doc
def _set_doc(self, obj):
self._doc = obj
doc = property(_get_doc, _set_doc)
def setup_transport(self, transport_param):
self.transport_param = transport_param
if isinstance(transport_param, six.string_types):
mod_path, cls_name = transport_param.rsplit('.', 1)
try:
cls = TRANSPORT_CACHE[(mod_path, cls_name)]
except KeyError:
mod = __import__(mod_path, globals(), locals(), ['foo'])
cls = getattr(mod, cls_name)
TRANSPORT_CACHE[(mod_path, cls_name)] = cls
self.transport = cls()
elif isinstance(transport_param, collections.Callable):
self.transport = transport_param()
else:
raise error.GrabMisuseError('Option `transport` should be string '
'or callable. Got %s'
% type(transport_param))
def reset(self):
"""
Reset all attributes which could be modified during previous request
or which is not initialized yet if this is the new Grab instance.
This methods is automatically called before each network request.
"""
self.request_head = None
self.request_log = None
self.request_body = None
self.request_method = None
self.transport.reset()
def clone(self, **kwargs):
"""
Create clone of Grab instance.
Cloned instance will have the same state: cookies, referrer, response
document data
:param **kwargs: overrides settings of cloned grab instance
"""
g = Grab(transport=self.transport_param)
g.config = self.dump_config()
g.doc = self.doc.copy()
g.doc.grab = weakref.proxy(g)
for key in self.clonable_attributes:
setattr(g, key, getattr(self, key))
g.cookies = deepcopy(self.cookies)
if kwargs:
g.setup(**kwargs)
return g
def adopt(self, g):
"""
Copy the state of another `Grab` instance.
Use case: create backup of current state to the cloned instance and
then restore the state from it.
"""
self.load_config(g.config)
self.doc = g.doc.copy(new_grab=self)
for key in self.clonable_attributes:
setattr(self, key, getattr(g, key))
self.cookies = deepcopy(g.cookies)
def dump_config(self):
"""
Make clone of current config.
"""
conf = copy_config(self.config, self.mutable_config_keys)
conf['state'] = {
'cookiejar_cookies': list(self.cookies.cookiejar),
}
return conf
def load_config(self, config):
"""
Configure grab instance with external config object.
"""
self.config = copy_config(config, self.mutable_config_keys)
if 'cookiejar_cookies' in config['state']:
self.cookies = CookieManager.from_cookie_list(
config['state']['cookiejar_cookies'])
def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
if 'hammer_mode' in kwargs:
logger.error('Option hammer_mode is deprecated. Grab does not '
'support hammer mode anymore.')
del kwargs['hammer_mode']
if 'hammer_timeouts' in kwargs:
logger.error('Option hammer_timeouts is deprecated. Grab does not'
' support hammer mode anymore.')
del kwargs['hammer_timeouts']
for key in kwargs:
if key not in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
self.config.update(kwargs)
def go(self, url, **kwargs):
"""
Go to ``url``
Args:
:url: could be absolute or relative. If relative then t will be
appended to the absolute URL of previous request.
"""
return self.request(url=url, **kwargs)
def download(self, url, location, **kwargs):
"""
Fetch document located at ``url`` and save to to ``location``.
"""
doc = self.go(url, **kwargs)
with open(location, 'wb') as out:
out.write(doc.body)
return len(doc.body)
def prepare_request(self, **kwargs):
"""
Configure all things to make real network request.
This method is called before doing real request via
transport extension.
"""
self.reset()
self.request_counter = next(REQUEST_COUNTER)
if kwargs:
self.setup(**kwargs)
if self.proxylist.size() and self.config['proxy_auto_change']:
self.change_proxy()
self.request_method = self.detect_request_method()
self.transport.process_config(self)
def log_request(self, extra=''):
"""
Send request details to logging system.
"""
thread_name = threading.currentThread().getName().lower()
if thread_name == 'mainthread':
thread_name = ''
else:
thread_name = '-%s' % thread_name
if self.config['proxy']:
if self.config['proxy_userpwd']:
auth = ' with authorization'
else:
auth = ''
proxy_info = ' via %s proxy of type %s%s' % (
self.config['proxy'], self.config['proxy_type'], auth)
else:
proxy_info = ''
if extra:
extra = '[%s] ' % extra
logger_network.debug('[%02d%s] %s%s %s%s',
self.request_counter, thread_name,
extra, self.request_method or 'GET',
self.config['url'], proxy_info)
def request(self, **kwargs):
"""
Perform network request.
You can specify grab settings in ``**kwargs``.
Any keyword argument will be passed to ``self.config``.
Returns: ``Document`` objects.
"""
self.prepare_request(**kwargs)
self.log_request()
try:
self.transport.request()
except error.GrabError:
self.reset_temporary_options()
self.save_failed_dump()
raise
else:
# That builds `self.doc`
self.process_request_result()
return self.doc
def process_request_result(self, prepare_response_func=None):
"""
Process result of real request performed via transport extension.
"""
now = datetime.utcnow()
# TODO: move into separate method
if self.config['debug_post']:
post = self.config['post'] or self.config['multipart_post']
if isinstance(post, dict):
post = list(post.items())
if post:
if isinstance(post, six.string_types):
post = post[:self.config['debug_post_limit']] + '...'
else:
items = normalize_http_values(post, charset='utf-8')
new_items = []
for key, value in items:
if len(value) > self.config['debug_post_limit']:
value = value[
:self.config['debug_post_limit']] + '...'
else:
value = value
new_items.append((key, value))
post = '\n'.join('%-25s: %s' % x for x in new_items)
if post:
logger_network.debug('[%02d] POST request:\n%s\n'
% (self.request_counter, post))
# It's important to delete old POST data after request is performed.
# If POST data is not cleared then next request will try to use them
# again!
old_refresh_count = self.config['refresh_redirect_count']
self.reset_temporary_options()
if prepare_response_func:
self.doc = prepare_response_func(self.transport, self)
else:
self.doc = self.transport.prepare_response(self)
# Workaround
if self.doc.grab is None:
self.doc.grab = weakref.proxy(self)
if self.config['reuse_cookies']:
self.cookies.update(self.doc.cookies)
self.doc.timestamp = now
self.config['charset'] = self.doc.charset
if self.config['log_file']:
with open(self.config['log_file'], 'wb') as out:
out.write(self.doc.body)
if self.config['cookiefile']:
self.cookies.save_to_file(self.config['cookiefile'])
if self.config['reuse_referer']:
self.config['referer'] = self.doc.url
self.copy_request_data()
# Should be called after `copy_request_data`
self.save_dumps()
# TODO: check max redirect count
if self.config['follow_refresh']:
url = find_refresh_url(self.doc.unicode_body())
if url is not None:
inc_count = old_refresh_count + 1
if inc_count > self.config['redirect_limit']:
raise error.GrabTooManyRedirectsError()
else:
return self.request(url=url,
refresh_redirect_count=inc_count)
return None
def reset_temporary_options(self):
self.config['post'] = None
self.config['multipart_post'] = None
self.config['method'] = None
self.config['body_storage_filename'] = None
self.config['refresh_redirect_count'] = 0
def save_failed_dump(self):
"""
Save dump of failed request for debugging.
This method is called then fatal network exception is raised.
The saved dump could be used for debugging the reason of the failure.
"""
# This is very untested feature, so
# I put it inside try/except to not break
# live spiders
try:
self.doc = self.transport.prepare_response(self)
self.copy_request_data()
self.save_dumps()
except Exception as ex:
logger.error(six.text_type(ex))
def copy_request_data(self):
# TODO: Maybe request object?
self.request_head = self.transport.request_head
self.request_body = self.transport.request_body
self.request_log = self.transport.request_log
def setup_document(self, content, **kwargs):
"""
Setup `response` object without real network requests.
Useful for testing and debuging.
All ``**kwargs`` will be passed to `Document` constructor.
"""
self.reset()
if isinstance(content, six.text_type):
raise error.GrabMisuseError('Method `setup_document` accepts only '
'byte string in `content` argument.')
# Configure Document instance
doc = Document(grab=self)
doc.body = content
doc.status = ''
doc.head = ''
doc.parse(charset=kwargs.get('document_charset'))
doc.code = 200
doc.total_time = 0
doc.connect_time = 0
doc.name_lookup_time = 0
doc.url = ''
for key, value in kwargs.items():
setattr(doc, key, value)
self.doc = doc
def change_proxy(self):
"""
Set random proxy from proxylist.
"""
if self.proxylist.size():
proxy = self.proxylist.get_random_proxy()
self.setup(proxy=proxy.get_address(),
proxy_userpwd=proxy.get_userpwd(),
proxy_type=proxy.proxy_type)
else:
logger.debug('Proxy list is empty')
def use_next_proxy(self):
"""
Set next proxy from proxylist.
"""
if self.proxylist.size():
proxy = self.proxylist.get_next_proxy()
self.setup(proxy=proxy.get_address(),
proxy_userpwd=proxy.get_userpwd(),
proxy_type=proxy.proxy_type)
else:
logger.debug('Proxy list is empty')
"""
Private methods
"""
def common_headers(self):
"""
Build headers which sends typical browser.
"""
return {
'Accept': 'text/xml,application/xml,application/xhtml+xml'
',text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.%d'
% randint(2, 5),
'Accept-Language': 'en-us,en;q=0.%d' % (randint(5, 9)),
'Accept-Charset': 'utf-8,windows-1251;q=0.7,*;q=0.%d'
% randint(5, 7),
'Keep-Alive': '300',
'Expect': '',
}
def save_dumps(self):
if self.config['log_dir']:
thread_name = threading.currentThread().getName().lower()
if thread_name == 'mainthread':
thread_name = ''
else:
thread_name = '-%s' % thread_name
file_name = os.path.join(self.config['log_dir'], '%02d%s.log' % (
self.request_counter, thread_name))
with open(file_name, 'w') as out:
out.write('Request headers:\n')
out.write(self.request_head)
out.write('\n')
out.write('Request body:\n')
out.write(self.request_body)
out.write('\n\n')
out.write('Response headers:\n')
out.write(self.doc.head)
file_extension = 'html'
file_name = os.path.join(self.config['log_dir'], '%02d%s.%s' % (
self.request_counter, thread_name, file_extension))
self.doc.save(file_name)
def make_url_absolute(self, url, resolve_base=False):
"""
Make url absolute using previous request url as base url.
"""
if self.config['url']:
if resolve_base:
ubody = self.doc.unicode_body()
base_url = find_base_url(ubody)
if base_url:
return urljoin(base_url, url)
return urljoin(self.config['url'], url)
else:
return url
def detect_request_method(self):
"""
Analyze request config and find which
request method will be used.
Returns request method in upper case
This method needs simetime when `process_config` method
was not called yet.
"""
method = self.config['method']
if method:
method = method.upper()
else:
if self.config['post'] or self.config['multipart_post']:
method = 'POST'
else:
method = 'GET'
return method
def clear_cookies(self):
"""
Clear all remembered cookies.
"""
self.config['cookies'] = {}
self.cookies.clear()
def setup_with_proxyline(self, line, proxy_type='http'):
# TODO: remove from base class
# maybe to proxylist?
host, port, user, pwd = parse_proxy_line(line)
server_port = '%s:%s' % (host, port)
self.setup(proxy=server_port, proxy_type=proxy_type)
if user:
userpwd = '%s:%s' % (user, pwd)
self.setup(proxy_userpwd=userpwd)
def __getstate__(self):
"""
Reset cached lxml objects which could not be pickled.
"""
state = {}
for cls in type(self).mro():
cls_slots = getattr(cls, '__slots__', ())
for slot in cls_slots:
if slot != '__weakref__':
if hasattr(self, slot):
state[slot] = getattr(self, slot)
if state['_doc']:
state['_doc'].grab = weakref.proxy(self)
return state
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@property
def request_headers(self):
"""
Temporary hack till the time I'll understand
where to store request details.
"""
try:
first_head = self.request_head.split('\r\n\r\n')[0]
lines = first_head.split('\r\n')
lines = [x for x in lines if ':' in x]
headers = email.message_from_string('\n'.join(lines))
return headers
except Exception as ex:
logger.error('Could not parse request headers', exc_info=ex)
return {}
def dump(self):
"""
Shortcut for real-time debugging.
"""
self.doc.save('/tmp/x.html')
# For backward compatibility
# WTF???
BaseGrab = Grab
| mit | 6,771,816,484,406,206,000 | 30.126649 | 79 | 0.554887 | false |
ihaveamac/Kurisu | cogs/mod_db.py | 1 | 1934 | from discord.ext import commands
from utils.checks import is_staff
from utils import crud
class ModDB(commands.Cog):
"""
Database management commands.
"""
NOT_FOUND = 'Flag was not found in the database. ⚠️'
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage()
return True
@is_staff('Owner')
@commands.command()
async def addflag(self, ctx, name):
"""Adds a config flag to the database. Owners only."""
if await crud.get_flag(name) is None:
await crud.add_flag(name)
await ctx.send('Flag added to the database. ✅')
else:
await ctx.send('Flag already exists in the database. ⚠️')
@is_staff('Owner')
@commands.command()
async def delflag(self, ctx, name):
"""Removes a config flag from the database. Owners only."""
if await crud.get_flag(name) is not None:
await crud.remove_flag(name)
await ctx.send('Flag removed from the database. ✅')
else:
await ctx.send(self.NOT_FOUND)
@is_staff('Owner')
@commands.command()
async def getflag(self, ctx, name):
"""Retrieves a config flag from the database. Owners only."""
value = await crud.get_flag(name)
if value is not None:
await ctx.send(f'{name} is set to: {value}.')
else:
await ctx.send(self.NOT_FOUND)
@is_staff('Owner')
@commands.command()
async def setflag(self, ctx, name, value:bool):
"""Sets a config flag in the database. Owners only."""
if await crud.get_flag(name) is not None:
await crud.set_flag(name, value)
await ctx.send("Flag's value was set. ✅")
else:
await ctx.send(self.NOT_FOUND)
def setup(bot):
bot.add_cog(ModDB(bot))
| apache-2.0 | 2,232,703,627,020,649,500 | 29.47619 | 69 | 0.589063 | false |
SherSingh07/albums | gallery/urls.py | 1 | 1586 | """gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
import album.views as album_views
import reg.views as reg_views
import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', album_views.home, name='home'),
url(r'^add_album/$', album_views.add_album, name='add_album'),
url(r'^save_album/$', album_views.save_album, name='save_album'),
url(r'^show_album/(?P<pk>[0-9]+)/$', album_views.show_album),
# login/logout
url(r'^login/$', album_views.login_page, name='login_page'),
url(r'^logout/$', album_views.logout_page, name='logout_page'),
url(r'^accounts/logout/$', album_views.logout_page, name='logout_page'),
url(r'^accounts/login/$', album_views.login_page, name='login_page'),
# registration
url(r'^register/$', reg_views.regform, name='regform'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| apache-2.0 | 6,114,460,916,937,103,000 | 38.65 | 79 | 0.68348 | false |
lepistone/odoo | addons/mrp/product.py | 23 | 3371 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_template(osv.osv):
_inherit = "product.template"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Bom = self.pool('mrp.bom')
res = {}
for product_tmpl_id in ids:
nb = Bom.search_count(cr, uid, [('product_tmpl_id', '=', product_tmpl_id)], context=context)
res[product_tmpl_id] = {
'bom_count': nb,
}
return res
def _bom_orders_count_mo(self, cr, uid, ids, name, arg, context=None):
res = {}
for product_tmpl_id in self.browse(cr, uid, ids):
res[product_tmpl_id.id] = sum([p.mo_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
"bom_ids": fields.one2many('mrp.bom', 'product_tmpl_id','Bill of Materials'),
'bom_count': fields.function(_bom_orders_count, string='# Bill of Material', type='integer', multi="_bom_order_count"),
'mo_count': fields.function(_bom_orders_count_mo, string='# Manufacturing Orders', type='integer'),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'bom_ids': []
})
return super(product_template, self).copy(cr, uid, id, default, context=context)
class product_product(osv.osv):
_inherit = "product.product"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Production = self.pool('mrp.production')
res = {}
for product_id in ids:
res[product_id] = Production.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
return res
_columns = {
"produce_delay": fields.float('Manufacturing Lead Time', help="Average delay in days to produce this product. In the case of multi-level BOM, the manufacturing lead times of the components will be added."),
'track_production': fields.boolean('Track Manufacturing Lots', help="Forces to specify a Serial Number for all moves containing this product and generated by a Manufacturing Order"),
'mo_count': fields.function(_bom_orders_count, string='# Manufacturing Orders', type='integer'),
}
_defaults = {
"produce_delay": 1,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 277,573,788,760,050,270 | 42.779221 | 214 | 0.611391 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.