gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/python
import json
import time
import os
import logging
import boto3
import botocore
import re
from . import create_logger
from datetime import datetime, timedelta
from .utils import (
does_key_exist,
read_s3,
put_object_s3
)
from .vars import (
AWS_REGION,
AWS_REGION_NAMES
)
from .exceptions import (
PricingRetrievalException
)
logger = create_logger(__name__)
def get_cost(postrunjson, job_id):
job = postrunjson.Job
def reformat_time(t, delta):
d = datetime.strptime(t, '%Y%m%d-%H:%M:%S-UTC') + timedelta(days=delta)
return d.strftime("%Y-%m-%d")
start_time = reformat_time(job.start_time, -1) # give more room
if(job.end_time != None):
end_time = reformat_time(job.end_time, 1)
else:
end_time = datetime.utcnow() + timedelta(days=1) # give more room
end_time = end_time.strftime("%Y-%m-%d")
billing_args = {'Filter': {'Tags': {'Key': 'Name', 'Values': ['awsem-' + job_id]}},
'Granularity': 'DAILY',
'TimePeriod': {'Start': start_time,
'End': end_time},
'Metrics': ['BlendedCost'],
}
try:
billingres = boto3.client('ce').get_cost_and_usage(**billing_args)
except botocore.exceptions.ClientError as e:
logger.warning("%s. Please try to deploy the latest version of Tibanna." % e)
return 0.0
cost = sum([float(_['Total']['BlendedCost']['Amount']) for _ in billingres['ResultsByTime']])
return cost
def get_cost_estimate(postrunjson, ebs_root_type = "gp3", aws_price_overwrite = None):
"""
aws_price_overwrite can be used to overwrite the prices obtained from AWS (e.g. ec2 spot price).
This allows historical cost estimates. It is also used for testing. It is a dictionary with keys:
ec2_spot_price, ec2_ondemand_price, ebs_root_storage_price, ebs_storage_price,
ebs_iops_price (gp3, io1), ebs_io2_iops_prices, ebs_throughput_price
"""
cfg = postrunjson.config
job = postrunjson.Job
estimated_cost = 0.0
if(job.end_time == None):
logger.warning("job.end_time not available. Cannot calculate estimated cost.")
return 0.0, "NA"
job_start = datetime.strptime(job.start_time, '%Y%m%d-%H:%M:%S-UTC')
job_end = datetime.strptime(job.end_time, '%Y%m%d-%H:%M:%S-UTC')
job_duration = (job_end - job_start).seconds / 3600.0 # in hours
try:
pricing_client = boto3.client('pricing', region_name=AWS_REGION)
# Get EC2 spot price
if(cfg.spot_instance):
if(cfg.spot_duration):
raise PricingRetrievalException("Pricing with spot_duration is not supported")
if(not job.instance_availablity_zone):
raise PricingRetrievalException("Instance availability zone is not available. You might have to deploy a newer version of Tibanna.")
ec2_client=boto3.client('ec2',region_name=AWS_REGION)
prices=ec2_client.describe_spot_price_history(
InstanceTypes=[cfg.instance_type],
ProductDescriptions=['Linux/UNIX'],
AvailabilityZone=job.instance_availablity_zone,
MaxResults=1) # Most recent price is on top
if(len(prices['SpotPriceHistory']) == 0):
raise PricingRetrievalException("Spot price could not be retrieved")
ec2_spot_price = (float)(prices['SpotPriceHistory'][0]['SpotPrice'])
if((aws_price_overwrite is not None) and 'ec2_spot_price' in aws_price_overwrite):
ec2_spot_price = aws_price_overwrite['ec2_spot_price']
estimated_cost = estimated_cost + ec2_spot_price * job_duration
else: # EC2 onDemand Prices
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'instanceType',
'Value': cfg.instance_type
},
{
'Type': 'TERM_MATCH',
'Field': 'operatingSystem',
'Value': 'Linux'
},
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Type': 'TERM_MATCH',
'Field': 'preInstalledSw',
'Value': 'NA'
},
{
'Type': 'TERM_MATCH',
'Field': 'capacitystatus',
'Value': 'used'
},
{
'Type': 'TERM_MATCH',
'Field': 'tenancy',
'Value': 'Shared'
},
])
price_list = prices["PriceList"]
if(not prices["PriceList"] or len(price_list) == 0):
raise PricingRetrievalException("We could not retrieve EC2 prices from Amazon")
if(len(price_list) > 1):
raise PricingRetrievalException("EC2 prices are ambiguous")
price_item = json.loads(price_list[0])
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ec2_ondemand_price = (float)(price_dimension['pricePerUnit']["USD"])
if((aws_price_overwrite is not None) and 'ec2_ondemand_price' in aws_price_overwrite):
ec2_ondemand_price = aws_price_overwrite['ec2_ondemand_price']
estimated_cost = estimated_cost + ec2_ondemand_price * job_duration
# Get EBS pricing
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Field': 'volumeApiName',
'Type': 'TERM_MATCH',
'Value': ebs_root_type,
},
{
'Field': 'productFamily',
'Type': 'TERM_MATCH',
'Value': 'Storage',
},
])
price_list = prices["PriceList"]
if(not prices["PriceList"] or len(price_list) == 0):
raise PricingRetrievalException("We could not retrieve EBS prices from Amazon")
if(len(price_list) > 1):
raise PricingRetrievalException("EBS prices are ambiguous")
price_item = json.loads(price_list[0])
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ebs_root_storage_price = (float)(price_dimension['pricePerUnit']["USD"])
if((aws_price_overwrite is not None) and 'ebs_root_storage_price' in aws_price_overwrite):
ebs_root_storage_price = aws_price_overwrite['ebs_root_storage_price']
# add root EBS costs
root_ebs_cost = ebs_root_storage_price * cfg.root_ebs_size * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + root_ebs_cost
# add additional EBS costs
if(cfg.ebs_type == "gp3"):
ebs_storage_cost = ebs_root_storage_price * cfg.ebs_size * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + ebs_storage_cost
# Add throughput
if(cfg.ebs_throughput):
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Field': 'volumeApiName',
'Type': 'TERM_MATCH',
'Value': cfg.ebs_type,
},
{
'Field': 'productFamily',
'Type': 'TERM_MATCH',
'Value': 'Provisioned Throughput',
},
])
price_list = prices["PriceList"]
if(not prices["PriceList"] or len(price_list) == 0):
raise PricingRetrievalException("We could not retrieve EBS throughput prices from Amazon")
if(len(price_list) > 1):
raise PricingRetrievalException("EBS throughput prices are ambiguous")
price_item = json.loads(price_list[0])
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ebs_throughput_price = (float)(price_dimension['pricePerUnit']["USD"])/1000 # unit: mbps
if((aws_price_overwrite is not None) and 'ebs_throughput_price' in aws_price_overwrite):
ebs_throughput_price = aws_price_overwrite['ebs_throughput_price']
free_tier = 125
ebs_throughput_cost = ebs_throughput_price * max(cfg.ebs_throughput - free_tier, 0) * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + ebs_throughput_cost
else:
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Field': 'volumeApiName',
'Type': 'TERM_MATCH',
'Value': cfg.ebs_type,
},
{
'Field': 'productFamily',
'Type': 'TERM_MATCH',
'Value': 'Storage',
},
])
price_list = prices["PriceList"]
if(not prices["PriceList"] or len(price_list) == 0):
raise PricingRetrievalException("We could not retrieve EBS prices from Amazon")
if(len(price_list) > 1):
raise PricingRetrievalException("EBS prices are ambiguous")
price_item = json.loads(price_list[0])
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ebs_storage_price = (float)(price_dimension['pricePerUnit']["USD"])
if((aws_price_overwrite is not None) and 'ebs_storage_price' in aws_price_overwrite):
ebs_storage_price = aws_price_overwrite['ebs_storage_price']
add_ebs_cost = ebs_storage_price * cfg.ebs_size * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + add_ebs_cost
## IOPS PRICING
# Add IOPS prices for io1 or gp3
if( (cfg.ebs_type == "io1" or cfg.ebs_type == "gp3") and cfg.ebs_iops):
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Field': 'volumeApiName',
'Type': 'TERM_MATCH',
'Value': cfg.ebs_type,
},
{
'Field': 'productFamily',
'Type': 'TERM_MATCH',
'Value': 'System Operation',
},
])
price_list = prices["PriceList"]
if(not prices["PriceList"] or len(price_list) == 0):
raise PricingRetrievalException("We could not retrieve EBS prices from Amazon")
if(len(price_list) > 1):
raise PricingRetrievalException("EBS prices are ambiguous")
price_item = json.loads(price_list[0])
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ebs_iops_price = (float)(price_dimension['pricePerUnit']["USD"])
if((aws_price_overwrite is not None) and 'ebs_iops_price' in aws_price_overwrite):
ebs_iops_price = aws_price_overwrite['ebs_iops_price']
if cfg.ebs_type == "gp3":
free_tier = 3000
ebs_iops_cost = ebs_iops_price * max(cfg.ebs_iops - free_tier, 0) * job_duration / (24.0*30.0)
else:
ebs_iops_cost = ebs_iops_price * cfg.ebs_iops * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + ebs_iops_cost
elif (cfg.ebs_type == "io2" and cfg.ebs_iops):
prices = pricing_client.get_products(ServiceCode='AmazonEC2', Filters=[
{
'Type': 'TERM_MATCH',
'Field': 'location',
'Value': AWS_REGION_NAMES[AWS_REGION]
},
{
'Field': 'volumeApiName',
'Type': 'TERM_MATCH',
'Value': cfg.ebs_type,
},
{
'Field': 'productFamily',
'Type': 'TERM_MATCH',
'Value': 'System Operation',
},
])
price_list = prices["PriceList"]
if(len(price_list) != 3):
raise PricingRetrievalException("EBS prices for io2 are incomplete")
ebs_io2_iops_prices = []
for price_entry in price_list:
price_item = json.loads(price_entry)
terms = price_item["terms"]
term = list(terms["OnDemand"].values())[0]
price_dimension = list(term["priceDimensions"].values())[0]
ebs_iops_price = (float)(price_dimension['pricePerUnit']["USD"])
ebs_io2_iops_prices.append(ebs_iops_price)
ebs_io2_iops_prices.sort(reverse=True)
if((aws_price_overwrite is not None) and 'ebs_io2_iops_prices' in aws_price_overwrite):
ebs_io2_iops_prices = aws_price_overwrite['ebs_io2_iops_prices']
# Pricing tiers are currently hardcoded. There wasn't a simple way to extract them from the pricing information
tier0 = 32000
tier1 = 64000
ebs_iops_cost = (
ebs_io2_iops_prices[0] * min(cfg.ebs_iops, tier0) + # Portion below 32000 IOPS
ebs_io2_iops_prices[1] * min(max(cfg.ebs_iops - tier0, 0), tier1 - tier0) + # Portion between 32001 and 64000 IOPS
ebs_io2_iops_prices[2] * max(cfg.ebs_iops - tier1, 0) # Portion above 64000 IOPS
) * job_duration / (24.0*30.0)
estimated_cost = estimated_cost + ebs_iops_cost
time_since_run = (datetime.utcnow() - job_end).total_seconds() / (3600 * 24) # days
estimation_type = "retrospective estimate" if time_since_run > 10 else "immediate estimate"
return estimated_cost, estimation_type
except botocore.exceptions.ClientError as e:
logger.warning("Cost estimation error: %s. Please try to deploy the latest version of Tibanna." % e)
return 0.0, "NA"
except PricingRetrievalException as e:
logger.warning("Cost estimation error: %s" % e)
return 0.0, "NA"
except Exception as e:
logger.warning("Cost estimation error: %s" % e)
return 0.0, "NA"
def get_cost_estimate_from_tsv(log_bucket, job_id):
s3_key = os.path.join(job_id + '.metrics/', 'metrics_report.tsv')
cost_estimate = 0.0
cost_estimate_type = "NA"
if(does_key_exist(log_bucket, s3_key) == False):
return cost_estimate, cost_estimate_type
try:
read_file = read_s3(log_bucket, s3_key)
for row in read_file.splitlines():
line = row.split("\t")
if(line[0] == "Estimated_Cost"):
cost_estimate = float(line[1])
if(line[0] == "Estimated_Cost_Type"):
cost_estimate_type = line[1]
except Exception as e:
logger.warning("Could not get cost estimate from tsv: %s" % e)
pass
return cost_estimate, cost_estimate_type
def update_cost_estimate_in_tsv(log_bucket, job_id, cost_estimate, cost_estimate_type, encryption=False, kms_key_id=None):
s3_key = os.path.join(job_id + '.metrics/', 'metrics_report.tsv')
if(does_key_exist(log_bucket, s3_key) == False):
return
# reading from metrics_report.tsv
read_file = read_s3(log_bucket, s3_key)
# get the current estimate type in the file
for row in read_file.splitlines():
line = row.split("\t")
if(line[0] == "Estimated_Cost_Type"):
current_cost_estimate_type = line[1]
if(cost_estimate_type=="retrospective estimate" and (current_cost_estimate_type=="immediate estimate" or current_cost_estimate_type=="actual cost") ):
logger.warning("There already is a probably more accurate estimate in the tsv. Not updating.")
return
write_file = ""
for row in read_file.splitlines():
# Remove Estimated_Cost and Estimated_Cost_Type from file, since we want to update it
if("Estimated_Cost" in row.split("\t") or "Estimated_Cost_Type" in row.split("\t")):
continue
if("Cost" in row.split("\t") and cost_estimate_type=="actual cost"):
continue
write_file = write_file + row + '\n'
if(cost_estimate_type=="actual cost"):
write_file = write_file + 'Cost\t' + str(cost_estimate) + '\n'
write_file = write_file + 'Estimated_Cost\t' + str(cost_estimate) + '\n'
write_file = write_file + 'Estimated_Cost_Type\t' + cost_estimate_type + '\n'
put_object_s3(content=write_file, key=s3_key, bucket=log_bucket,
encrypt_s3_upload=encryption, kms_key_id=kms_key_id)
def update_cost_in_tsv(log_bucket, job_id, cost,
encryption=False, kms_key_id=None):
s3_key = os.path.join(job_id + '.metrics/', 'metrics_report.tsv')
if(does_key_exist(log_bucket, s3_key) == False):
return
# reading from metrics_report.tsv
read_file = read_s3(log_bucket, s3_key)
write_file = ""
for row in read_file.splitlines():
# Remove Cost from file, since we want to update it
if("Cost" not in row.split("\t")):
write_file = write_file + row + '\n'
write_file = write_file + 'Cost\t' + str(cost) + '\n'
put_object_s3(content=write_file, key=s3_key, bucket=log_bucket,
encrypt_s3_upload=encryption, kms_key_id=kms_key_id)
|
|
#
# Copyright 2014 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client and server implementations of HTTP/1.x.
.. versionadded:: 4.0
"""
from __future__ import absolute_import, division, print_function
import re
from tornado.concurrent import (Future, future_add_done_callback,
future_set_result_unless_cancelled)
from tornado.escape import native_str, utf8
from tornado import gen
from tornado import httputil
from tornado import iostream
from tornado.log import gen_log, app_log
from tornado import stack_context
from tornado.util import GzipDecompressor, PY3
class _QuietException(Exception):
def __init__(self):
pass
class _ExceptionLoggingContext(object):
"""Used with the ``with`` statement when calling delegate methods to
log any exceptions with the given logger. Any exceptions caught are
converted to _QuietException
"""
def __init__(self, logger):
self.logger = logger
def __enter__(self):
pass
def __exit__(self, typ, value, tb):
if value is not None:
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
raise _QuietException
class HTTP1ConnectionParameters(object):
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
"""
def __init__(self, no_keep_alive=False, chunk_size=None,
max_header_size=None, header_timeout=None, max_body_size=None,
body_timeout=None, decompress=False):
"""
:arg bool no_keep_alive: If true, always close the connection after
one request.
:arg int chunk_size: how much data to read into memory at once
:arg int max_header_size: maximum amount of data for HTTP headers
:arg float header_timeout: how long to wait for all headers (seconds)
:arg int max_body_size: maximum amount of data for body
:arg float body_timeout: how long to wait while reading body (seconds)
:arg bool decompress: if true, decode incoming
``Content-Encoding: gzip``
"""
self.no_keep_alive = no_keep_alive
self.chunk_size = chunk_size or 65536
self.max_header_size = max_header_size or 65536
self.header_timeout = header_timeout
self.max_body_size = max_body_size
self.body_timeout = body_timeout
self.decompress = decompress
class HTTP1Connection(httputil.HTTPConnection):
"""Implements the HTTP/1.x protocol.
This class can be on its own for clients, or via `HTTP1ServerConnection`
for servers.
"""
def __init__(self, stream, is_client, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg bool is_client: client or server
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
:arg context: an opaque application-defined object that can be accessed
as ``connection.context``.
"""
self.is_client = is_client
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self.no_keep_alive = params.no_keep_alive
# The body limits can be altered by the delegate, so save them
# here instead of just referencing self.params later.
self._max_body_size = (self.params.max_body_size or
self.stream.max_buffer_size)
self._body_timeout = self.params.body_timeout
# _write_finished is set to True when finish() has been called,
# i.e. there will be no more data sent. Data may still be in the
# stream's write buffer.
self._write_finished = False
# True when we have read the entire incoming body.
self._read_finished = False
# _finish_future resolves when all data has been written and flushed
# to the IOStream.
self._finish_future = Future()
# If true, the connection should be closed after this request
# (after the response has been written in the server side,
# and after it has been read in the client)
self._disconnect_on_finish = False
self._clear_callbacks()
# Save the start lines after we read or write them; they
# affect later processing (e.g. 304 responses and HEAD methods
# have content-length but no bodies)
self._request_start_line = None
self._response_start_line = None
self._request_headers = None
# True if we are writing output with chunked encoding.
self._chunking_output = None
# While reading a body with a content-length, this is the
# amount left to read.
self._expected_content_remaining = None
# A Future for our outgoing writes, returned by IOStream.write.
self._pending_write = None
def read_response(self, delegate):
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to None after the full response has
been read.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate)
@gen.coroutine
def _read_message(self, delegate):
need_delegate_close = False
try:
header_future = self.stream.read_until_regex(
b"\r?\n\r?\n",
max_bytes=self.params.max_header_size)
if self.params.header_timeout is None:
header_data = yield header_future
else:
try:
header_data = yield gen.with_timeout(
self.stream.io_loop.time() + self.params.header_timeout,
header_future,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
self.close()
raise gen.Return(False)
start_line, headers = self._parse_headers(header_data)
if self.is_client:
start_line = httputil.parse_response_start_line(start_line)
self._response_start_line = start_line
else:
start_line = httputil.parse_request_start_line(start_line)
self._request_start_line = start_line
self._request_headers = headers
self._disconnect_on_finish = not self._can_keep_alive(
start_line, headers)
need_delegate_close = True
with _ExceptionLoggingContext(app_log):
header_future = delegate.headers_received(start_line, headers)
if header_future is not None:
yield header_future
if self.stream is None:
# We've been detached.
need_delegate_close = False
raise gen.Return(False)
skip_body = False
if self.is_client:
if (self._request_start_line is not None and
self._request_start_line.method == 'HEAD'):
skip_body = True
code = start_line.code
if code == 304:
# 304 responses may include the content-length header
# but do not actually have a body.
# http://tools.ietf.org/html/rfc7230#section-3.3
skip_body = True
if code >= 100 and code < 200:
# 1xx responses should never indicate the presence of
# a body.
if ('Content-Length' in headers or
'Transfer-Encoding' in headers):
raise httputil.HTTPInputError(
"Response code %d cannot have body" % code)
# TODO: client delegates will get headers_received twice
# in the case of a 100-continue. Document or change?
yield self._read_message(delegate)
else:
if (headers.get("Expect") == "100-continue" and
not self._write_finished):
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
if not skip_body:
body_future = self._read_body(
start_line.code if self.is_client else 0, headers, delegate)
if body_future is not None:
if self._body_timeout is None:
yield body_future
else:
try:
yield gen.with_timeout(
self.stream.io_loop.time() + self._body_timeout,
body_future,
quiet_exceptions=iostream.StreamClosedError)
except gen.TimeoutError:
gen_log.info("Timeout reading body from %s",
self.context)
self.stream.close()
raise gen.Return(False)
self._read_finished = True
if not self._write_finished or self.is_client:
need_delegate_close = False
with _ExceptionLoggingContext(app_log):
delegate.finish()
# If we're waiting for the application to produce an asynchronous
# response, and we're not detached, register a close callback
# on the stream (we didn't need one while we were reading)
if (not self._finish_future.done() and
self.stream is not None and
not self.stream.closed()):
self.stream.set_close_callback(self._on_connection_close)
yield self._finish_future
if self.is_client and self._disconnect_on_finish:
self.close()
if self.stream is None:
raise gen.Return(False)
except httputil.HTTPInputError as e:
gen_log.info("Malformed HTTP message from %s: %s",
self.context, e)
if not self.is_client:
yield self.stream.write(b'HTTP/1.1 400 Bad Request\r\n\r\n')
self.close()
raise gen.Return(False)
finally:
if need_delegate_close:
with _ExceptionLoggingContext(app_log):
delegate.on_connection_close()
header_future = None
self._clear_callbacks()
raise gen.Return(True)
def _clear_callbacks(self):
"""Clears the callback attributes.
This allows the request handler to be garbage collected more
quickly in CPython by breaking up reference cycles.
"""
self._write_callback = None
self._write_future = None
self._close_callback = None
if self.stream is not None:
self.stream.set_close_callback(None)
def set_close_callback(self, callback):
"""Sets a callback that will be run when the connection is closed.
.. deprecated:: 4.0
Use `.HTTPMessageDelegate.on_connection_close` instead.
"""
self._close_callback = stack_context.wrap(callback)
def _on_connection_close(self):
# Note that this callback is only registered on the IOStream
# when we have finished reading the request and are waiting for
# the application to produce its response.
if self._close_callback is not None:
callback = self._close_callback
self._close_callback = None
callback()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
self._clear_callbacks()
def close(self):
if self.stream is not None:
self.stream.close()
self._clear_callbacks()
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def detach(self):
"""Take control of the underlying stream.
Returns the underlying `.IOStream` object and stops all further
HTTP processing. May only be called during
`.HTTPMessageDelegate.headers_received`. Intended for implementing
protocols like websockets that tunnel over an HTTP handshake.
"""
self._clear_callbacks()
stream = self.stream
self.stream = None
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
return stream
def set_body_timeout(self, timeout):
"""Sets the body timeout for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._body_timeout = timeout
def set_max_body_size(self, max_body_size):
"""Sets the body size limit for a single request.
Overrides the value from `.HTTP1ConnectionParameters`.
"""
self._max_body_size = max_body_size
def write_headers(self, start_line, headers, chunk=None, callback=None):
"""Implements `.HTTPConnection.write_headers`."""
lines = []
if self.is_client:
self._request_start_line = start_line
lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
# Client requests with a non-empty body must have either a
# Content-Length or a Transfer-Encoding.
self._chunking_output = (
start_line.method in ('POST', 'PUT', 'PATCH') and
'Content-Length' not in headers and
'Transfer-Encoding' not in headers)
else:
self._response_start_line = start_line
lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
self._chunking_output = (
# TODO: should this use
# self._request_start_line.version or
# start_line.version?
self._request_start_line.version == 'HTTP/1.1' and
# 1xx, 204 and 304 responses have no body (not even a zero-length
# body), and so should not have either Content-Length or
# Transfer-Encoding headers.
start_line.code not in (204, 304) and
(start_line.code < 100 or start_line.code >= 200) and
# No need to chunk the output if a Content-Length is specified.
'Content-Length' not in headers and
# Applications are discouraged from touching Transfer-Encoding,
# but if they do, leave it alone.
'Transfer-Encoding' not in headers)
# If connection to a 1.1 client will be closed, inform client
if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish):
headers['Connection'] = 'close'
# If a 1.0 client asked for keep-alive, add the header.
if (self._request_start_line.version == 'HTTP/1.0' and
self._request_headers.get('Connection', '').lower() == 'keep-alive'):
headers['Connection'] = 'Keep-Alive'
if self._chunking_output:
headers['Transfer-Encoding'] = 'chunked'
if (not self.is_client and
(self._request_start_line.method == 'HEAD' or
start_line.code == 304)):
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
# TODO: headers are supposed to be of type str, but we still have some
# cases that let bytes slip through. Remove these native_str calls when those
# are fixed.
header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all())
if PY3:
lines.extend(l.encode('latin1') for l in header_lines)
else:
lines.extend(header_lines)
for line in lines:
if b'\n' in line:
raise ValueError('Newline in header: ' + repr(line))
future = None
if self.stream.closed():
future = self._write_future = Future()
future.set_exception(iostream.StreamClosedError())
future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
data = b"\r\n".join(lines) + b"\r\n\r\n"
if chunk:
data += self._format_chunk(chunk)
self._pending_write = self.stream.write(data)
self._pending_write.add_done_callback(self._on_write_complete)
return future
def _format_chunk(self, chunk):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
# Close the stream now to stop further framing errors.
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
if self._chunking_output and chunk:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
else:
return chunk
def write(self, chunk, callback=None):
"""Implements `.HTTPConnection.write`.
For backwards compatibility it is allowed but deprecated to
skip `write_headers` and instead call `write()` with a
pre-encoded header block.
"""
future = None
if self.stream.closed():
future = self._write_future = Future()
self._write_future.set_exception(iostream.StreamClosedError())
self._write_future.exception()
else:
if callback is not None:
self._write_callback = stack_context.wrap(callback)
else:
future = self._write_future = Future()
self._pending_write = self.stream.write(self._format_chunk(chunk))
self._pending_write.add_done_callback(self._on_write_complete)
return future
def finish(self):
"""Implements `.HTTPConnection.finish`."""
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0 and
not self.stream.closed()):
self.stream.close()
raise httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
if self._chunking_output:
if not self.stream.closed():
self._pending_write = self.stream.write(b"0\r\n\r\n")
self._pending_write.add_done_callback(self._on_write_complete)
self._write_finished = True
# If the app finished the request while we're still reading,
# divert any remaining data away from the delegate and
# close the connection when we're done sending our response.
# Closing the connection is the only way to avoid reading the
# whole input body.
if not self._read_finished:
self._disconnect_on_finish = True
# No more data is coming, so instruct TCP to send any remaining
# data immediately instead of waiting for a full packet or ack.
self.stream.set_nodelay(True)
if self._pending_write is None:
self._finish_request(None)
else:
future_add_done_callback(self._pending_write, self._finish_request)
def _on_write_complete(self, future):
exc = future.exception()
if exc is not None and not isinstance(exc, iostream.StreamClosedError):
future.result()
if self._write_callback is not None:
callback = self._write_callback
self._write_callback = None
self.stream.io_loop.add_callback(callback)
if self._write_future is not None:
future = self._write_future
self._write_future = None
future_set_result_unless_cancelled(future, None)
def _can_keep_alive(self, start_line, headers):
if self.params.no_keep_alive:
return False
connection_header = headers.get("Connection")
if connection_header is not None:
connection_header = connection_header.lower()
if start_line.version == "HTTP/1.1":
return connection_header != "close"
elif ("Content-Length" in headers or
headers.get("Transfer-Encoding", "").lower() == "chunked" or
getattr(start_line, 'method', None) in ("HEAD", "GET")):
# start_line may be a request or response start line; only
# the former has a method attribute.
return connection_header == "keep-alive"
return False
def _finish_request(self, future):
self._clear_callbacks()
if not self.is_client and self._disconnect_on_finish:
self.close()
return
# Turn Nagle's algorithm back on, leaving the stream in its
# default state for the next request.
self.stream.set_nodelay(False)
if not self._finish_future.done():
future_set_result_unless_cancelled(self._finish_future, None)
def _parse_headers(self, data):
# The lstrip removes newlines that some implementations sometimes
# insert between messages of a reused connection. Per RFC 7230,
# we SHOULD ignore at least one empty line before the request.
# http://tools.ietf.org/html/rfc7230#section-3.5
data = native_str(data.decode('latin1')).lstrip("\r\n")
# RFC 7230 section allows for both CRLF and bare LF.
eol = data.find("\n")
start_line = data[:eol].rstrip("\r")
try:
headers = httputil.HTTPHeaders.parse(data[eol:])
except ValueError:
# probably form split() if there was no ':' in the line
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
data[eol:100])
return start_line, headers
def _read_body(self, code, headers, delegate):
if "Content-Length" in headers:
if "Transfer-Encoding" in headers:
# Response cannot contain both Content-Length and
# Transfer-Encoding headers.
# http://tools.ietf.org/html/rfc7230#section-3.3.3
raise httputil.HTTPInputError(
"Response with both Transfer-Encoding and Content-Length")
if "," in headers["Content-Length"]:
# Proxies sometimes cause Content-Length headers to get
# duplicated. If all the values are identical then we can
# use them but if they differ it's an error.
pieces = re.split(r',\s*', headers["Content-Length"])
if any(i != pieces[0] for i in pieces):
raise httputil.HTTPInputError(
"Multiple unequal Content-Lengths: %r" %
headers["Content-Length"])
headers["Content-Length"] = pieces[0]
try:
content_length = int(headers["Content-Length"])
except ValueError:
# Handles non-integer Content-Length value.
raise httputil.HTTPInputError(
"Only integer Content-Length is allowed: %s" % headers["Content-Length"])
if content_length > self._max_body_size:
raise httputil.HTTPInputError("Content-Length too long")
else:
content_length = None
if code == 204:
# This response code is not allowed to have a non-empty body,
# and has an implicit length of zero instead of read-until-close.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
if ("Transfer-Encoding" in headers or
content_length not in (None, 0)):
raise httputil.HTTPInputError(
"Response with code %d should not have body" % code)
content_length = 0
if content_length is not None:
return self._read_fixed_body(content_length, delegate)
if headers.get("Transfer-Encoding", "").lower() == "chunked":
return self._read_chunked_body(delegate)
if self.is_client:
return self._read_body_until_close(delegate)
return None
@gen.coroutine
def _read_fixed_body(self, content_length, delegate):
while content_length > 0:
body = yield self.stream.read_bytes(
min(self.params.chunk_size, content_length), partial=True)
content_length -= len(body)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(body)
if ret is not None:
yield ret
@gen.coroutine
def _read_chunked_body(self, delegate):
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
total_size = 0
while True:
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
chunk_len = int(chunk_len.strip(), 16)
if chunk_len == 0:
crlf = yield self.stream.read_bytes(2)
if crlf != b'\r\n':
raise httputil.HTTPInputError("improperly terminated chunked request")
return
total_size += chunk_len
if total_size > self._max_body_size:
raise httputil.HTTPInputError("chunked body too large")
bytes_to_read = chunk_len
while bytes_to_read:
chunk = yield self.stream.read_bytes(
min(bytes_to_read, self.params.chunk_size), partial=True)
bytes_to_read -= len(chunk)
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
ret = delegate.data_received(chunk)
if ret is not None:
yield ret
# chunk ends with \r\n
crlf = yield self.stream.read_bytes(2)
assert crlf == b"\r\n"
@gen.coroutine
def _read_body_until_close(self, delegate):
body = yield self.stream.read_until_close()
if not self._write_finished or self.is_client:
with _ExceptionLoggingContext(app_log):
delegate.data_received(body)
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
"""
def __init__(self, delegate, chunk_size):
self._delegate = delegate
self._chunk_size = chunk_size
self._decompressor = None
def headers_received(self, start_line, headers):
if headers.get("Content-Encoding") == "gzip":
self._decompressor = GzipDecompressor()
# Downstream delegates will only see uncompressed data,
# so rename the content-encoding header.
# (but note that curl_httpclient doesn't do this).
headers.add("X-Consumed-Content-Encoding",
headers["Content-Encoding"])
del headers["Content-Encoding"]
return self._delegate.headers_received(start_line, headers)
@gen.coroutine
def data_received(self, chunk):
if self._decompressor:
compressed_data = chunk
while compressed_data:
decompressed = self._decompressor.decompress(
compressed_data, self._chunk_size)
if decompressed:
ret = self._delegate.data_received(decompressed)
if ret is not None:
yield ret
compressed_data = self._decompressor.unconsumed_tail
else:
ret = self._delegate.data_received(chunk)
if ret is not None:
yield ret
def finish(self):
if self._decompressor is not None:
tail = self._decompressor.flush()
if tail:
# I believe the tail will always be empty (i.e.
# decompress will return all it can). The purpose
# of the flush call is to detect errors such
# as truncated input. But in case it ever returns
# anything, treat it as an extra chunk
self._delegate.data_received(tail)
return self._delegate.finish()
def on_connection_close(self):
return self._delegate.on_connection_close()
class HTTP1ServerConnection(object):
"""An HTTP/1.x server."""
def __init__(self, stream, params=None, context=None):
"""
:arg stream: an `.IOStream`
:arg params: a `.HTTP1ConnectionParameters` or None
:arg context: an opaque application-defined object that is accessible
as ``connection.context``
"""
self.stream = stream
if params is None:
params = HTTP1ConnectionParameters()
self.params = params
self.context = context
self._serving_future = None
@gen.coroutine
def close(self):
"""Closes the connection.
Returns a `.Future` that resolves after the serving loop has exited.
"""
self.stream.close()
# Block until the serving loop is done, but ignore any exceptions
# (start_serving is already responsible for logging them).
try:
yield self._serving_future
except Exception:
pass
def start_serving(self, delegate):
"""Starts serving requests on this connection.
:arg delegate: a `.HTTPServerConnectionDelegate`
"""
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
self._serving_future = self._server_request_loop(delegate)
# Register the future on the IOLoop so its errors get logged.
self.stream.io_loop.add_future(self._serving_future,
lambda f: f.result())
@gen.coroutine
def _server_request_loop(self, delegate):
try:
while True:
conn = HTTP1Connection(self.stream, False,
self.params, self.context)
request_delegate = delegate.start_request(self, conn)
try:
ret = yield conn.read_response(request_delegate)
except (iostream.StreamClosedError,
iostream.UnsatisfiableReadError):
return
except _QuietException:
# This exception was already logged.
conn.close()
return
except Exception:
gen_log.error("Uncaught exception", exc_info=True)
conn.close()
return
if not ret:
return
yield gen.moment
finally:
delegate.on_close(self)
|
|
from django.db import models
from django.contrib.auth.models import User
from jsonfield import JSONField
from django.utils import timezone
class Language(models.Model):
def __unicode__(self): # __unicode__ on Python 2
return self.name
TARGET = (
('Test', 'Testsite'),
('Live', 'Livesite'),
('Rockstar', 'Rockstar'),
)
enabled = models.BooleanField(default=False)
code = models.CharField(max_length=5)
name = models.CharField(max_length=32)
ename = models.CharField(max_length=32)
master = models.CharField(max_length=32)
target = models.CharField(max_length=8, choices=TARGET)
cID = models.IntegerField()
data = {} #only used for statistical data storage in views
# Create your models here.
class LanguageStatistic(models.Model):
def __unicode__(self): # __unicode__ on Python 2
return "{} / {} / {} / {} / {} / {} / {} / {} / {} / {}".format(self.date,self.lang,self.type,self.target, self.total, self.count, self.speed, self.totalSecs, self.countSecs, self.speedSecs)
TYPE = (
('C', 'Crowdin'),
('D', 'Dubbed'),
('S', 'Subtitled'),
)
TARGET = (
('T', 'Test'),
('L', 'Live'),
('R', 'Rockstar'),
)
lang = models.CharField(max_length=5)
date = models.DateField('date')
type = models.CharField(max_length=1, choices=TYPE)
target = models.CharField(max_length=1, choices=TARGET)
total = models.IntegerField(default=0)
count = models.IntegerField(default=0)
speed = models.IntegerField(default=0)
totalSecs = models.IntegerField(default=0) #Seconds or Words
countSecs = models.IntegerField(default=0) #Seconds or Words
speedSecs = models.IntegerField(default=0) #Speed in Seconds or Words / last 30 days
totalStrings = models.IntegerField(default=0) #Strings
countStrings = models.IntegerField(default=0) #Strings
speedStrings = models.IntegerField(default=0) #Speed in Strings / last 30 days
def calculateSpeed(self):
import datetime
#from datetime import datetime, timedelta
#print(self.date)
monthAgo = self.date - datetime.timedelta(days=30)
#print(monthAgo)
try:
oldStat = LanguageStatistic.objects.get(lang=self.lang,type=self.type,date=monthAgo)
self.speed = self.count - oldStat.count
self.speedSecs = self.countSecs - oldStat.countSecs
self.speedStrings = self.countStrings - oldStat.countStrings
except LanguageStatistic.DoesNotExist:
self.speed = 0
self.speedSecs = 0
self.speedStrings = 0
def getLeft(self):
return self.total-self.count
def getLeftString(self):
return self.totalStrings - self.countStrings
def getPercent(self):
if (self.totalSecs > 0):
return self.countSecs/self.totalSecs
else:
return 1
class Subtitle(models.Model):
def __unicode__(self): # __unicode__ on Python 2
return u"{}:{}/{}/{}/{}/{}".format(self.title,self.amaraID,self.lang,self.percentDone,self.lines, self.author)
created = models.DateTimeField(default=timezone.now())
amaraID = models.CharField(db_index=True,max_length=16)
lang = models.CharField(db_index=True,max_length=5) # can be de_CH
title = models.CharField(max_length=256)
author = models.CharField(max_length=32)
origLines = models.IntegerField()
lines = models.IntegerField()
completion = models.BooleanField(default=False)
percentDone = models.FloatField()
data = JSONField()
infoData = JSONField()
def isComplete(self):
""" Considers a subtitle as complete if amara has subtitles_complete and subtitles_count > 0.75 of english """
if (self.completion and self.percentDone > 0.75):
return True
else:
return False
def hasSubtitle(self):
return self.isComplete()
def Count(self):
return self.lines
def OrigCount(self):
return self.origLines
DEFAULT_TRANSLATOR_ID = 1
class Video(models.Model):
def __str__(self): # __unicode__ on Python 2
return self.TITLE
# Check if the video has been dubbed for language lang
def isDubbed(self,lang):
if ( len(getattr(self,lang.master)) > 0 ):
return True
else:
return False
def loadSubtitle(self,lang):
try:
self.subtitle = Subtitle.objects.get(amaraID=self.AMARA_ID,lang=lang.code)
except Subtitle.DoesNotExist:
self.subtitle = Subtitle(origLines=0,lines=0,percentDone=0)
def getTranslatedTitle(self):
title = self.subtitle.title
return title if (title <> "") else self.TITLE
def subtitleComplete(self):
return self.subtitle.isComplete()
def subtitleCount(self):
return self.subtitle.OrigCount()
def subtitleTranslatedCount(self):
return self.subtitle.Count()
def subtitlePercentDone(self):
return self.subtitle.percentDone
def getYoutubeID(self,lang):
return getattr(self,lang.master)
def getAuthor(self,lang):
if(lang.code == "de"):
if (str(self.deTranslator) == 'admin'):
if(self.isDubbed(lang)):
return 'unknown'
else:
return self.subtitle.author
else:
return self.deTranslator
else:
if (self.isDubbed(lang)):
return "unknown"
else:
return self.subtitle.author
subtitle = {}
deTranslator = models.ForeignKey(User,default=DEFAULT_TRANSLATOR_ID)
amaraOK = models.BooleanField(default=False)
showsExercise = models.BooleanField(default=False)
SERIAL = models.IntegerField()
DATE_ADDED = models.DateField('Updated')
DATE_CREATED = models.DateField('Date')
TITLE = models.CharField(max_length=128)
LICENSE = models.CharField(max_length=32)
DOMAIN = models.CharField(max_length=32)
SUBJECT = models.CharField(max_length=64)
TOPIC = models.CharField(max_length=256)
TUTORIAL = models.CharField(max_length=256)
TITLE_ID = models.CharField(max_length=256)
DURATION = models.IntegerField()
URL = models.CharField(max_length=256)
AMARA_ID = models.CharField(db_index=True,max_length=16,blank=True)
REQUIRED_FOR = models.CharField(db_index=True,max_length=32)
TRANSCRIPT = models.CharField(max_length=1)
ENGLISH = models.CharField(max_length=16,blank=True)
ARABIC = models.CharField(max_length=16,blank=True)
ARMENIAN = models.CharField(max_length=16,blank=True)
BAHASA_INDONESIA = models.CharField(max_length=16,blank=True)
BANGLA = models.CharField(max_length=16,blank=True)
BULGARIAN = models.CharField(max_length=16,blank=True)
CHINESE = models.CharField(max_length=16,blank=True)
CZECH = models.CharField(max_length=16,blank=True)
DANISH = models.CharField(max_length=16,blank=True)
DARI = models.CharField(max_length=16,blank=True,default='')
DEUTSCH = models.CharField(max_length=16,blank=True)
ESPANOL = models.CharField(max_length=16,blank=True)
FARSI = models.CharField(max_length=16,blank=True)
FRANCAIS = models.CharField(max_length=16,blank=True)
GREEK = models.CharField(max_length=16,blank=True)
HEBREW = models.CharField(max_length=16,blank=True)
ITALIANO = models.CharField(max_length=16,blank=True)
JAPANESE = models.CharField(max_length=16,blank=True)
KISWAHILI = models.CharField(max_length=16,blank=True)
KOREAN = models.CharField(max_length=16,blank=True)
MONGOLIAN = models.CharField(max_length=16,blank=True)
NEDERLANDS = models.CharField(max_length=16,blank=True)
NEPALI = models.CharField(max_length=16,blank=True)
NORSK = models.CharField(max_length=16,blank=True)
POLISH = models.CharField(max_length=16,blank=True)
PORTUGUES = models.CharField(max_length=16,blank=True)
PORTUGAL_PORTUGUES = models.CharField(max_length=16,blank=True)
PUNJABI = models.CharField(max_length=16,blank=True)
RUSSIAN = models.CharField(max_length=16,blank=True)
SERBIAN = models.CharField(max_length=16,blank=True)
SINDHI = models.CharField(max_length=16,blank=True)
SINHALA = models.CharField(max_length=16,blank=True)
TAMIL = models.CharField(max_length=16,blank=True)
TELUGU = models.CharField(max_length=16,blank=True)
THAI = models.CharField(max_length=16,blank=True)
TURKCE = models.CharField(max_length=16,blank=True)
UKRAINIAN = models.CharField(max_length=16,blank=True)
URDU = models.CharField(max_length=16,blank=True)
XHOSA = models.CharField(max_length=16,blank=True)
ZULU = models.CharField(max_length=64,blank=True)
|
|
import sys
import unittest
import tkinter
from tkinter import ttk
from test.support import requires, run_unittest, swap_attr
from tkinter.test.support import AbstractTkTest, destroy_default_root
requires('gui')
class LabeledScaleTest(AbstractTkTest, unittest.TestCase):
def tearDown(self):
self.root.update_idletasks()
super().tearDown()
def test_widget_destroy(self):
# automatically created variable
x = ttk.LabeledScale(self.root)
var = x._variable._name
x.destroy()
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, var)
# manually created variable
myvar = tkinter.DoubleVar(self.root)
name = myvar._name
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
if self.wantobjects:
self.assertEqual(x.tk.globalgetvar(name), myvar.get())
else:
self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get())
del myvar
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, name)
# checking that the tracing callback is properly removed
myvar = tkinter.IntVar(self.root)
# LabeledScale will start tracing myvar
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
# Unless the tracing callback was removed, creating a new
# LabeledScale with the same var will cause an error now. This
# happens because the variable will be set to (possibly) a new
# value which causes the tracing callback to be called and then
# it tries calling instance attributes not yet defined.
ttk.LabeledScale(self.root, variable=myvar)
if hasattr(sys, 'last_type'):
self.assertNotEqual(sys.last_type, tkinter.TclError)
def test_initialization_no_master(self):
# no master passing
with swap_attr(tkinter, '_default_root', None), \
swap_attr(tkinter, '_support_default_root', True):
try:
x = ttk.LabeledScale()
self.assertIsNotNone(tkinter._default_root)
self.assertEqual(x.master, tkinter._default_root)
self.assertEqual(x.tk, tkinter._default_root.tk)
x.destroy()
finally:
destroy_default_root()
def test_initialization(self):
# master passing
master = tkinter.Frame(self.root)
x = ttk.LabeledScale(master)
self.assertEqual(x.master, master)
x.destroy()
# variable initialization/passing
passed_expected = (('0', 0), (0, 0), (10, 10),
(-1, -1), (sys.maxsize + 1, sys.maxsize + 1),
(2.5, 2), ('2.5', 2))
for pair in passed_expected:
x = ttk.LabeledScale(self.root, from_=pair[0])
self.assertEqual(x.value, pair[1])
x.destroy()
x = ttk.LabeledScale(self.root, from_=None)
self.assertRaises((ValueError, tkinter.TclError), x._variable.get)
x.destroy()
# variable should have its default value set to the from_ value
myvar = tkinter.DoubleVar(self.root, value=20)
x = ttk.LabeledScale(self.root, variable=myvar)
self.assertEqual(x.value, 0)
x.destroy()
# check that it is really using a DoubleVar
x = ttk.LabeledScale(self.root, variable=myvar, from_=0.5)
self.assertEqual(x.value, 0.5)
self.assertEqual(x._variable._name, myvar._name)
x.destroy()
# widget positionment
def check_positions(scale, scale_pos, label, label_pos):
self.assertEqual(scale.pack_info()['side'], scale_pos)
self.assertEqual(label.place_info()['anchor'], label_pos)
x = ttk.LabeledScale(self.root, compound='top')
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
x = ttk.LabeledScale(self.root, compound='bottom')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
# invert default positions
x = ttk.LabeledScale(self.root, compound='unknown')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
x = ttk.LabeledScale(self.root) # take default positions
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
# extra, and invalid, kwargs
self.assertRaises(tkinter.TclError, ttk.LabeledScale, master, a='b')
def test_horizontal_range(self):
lscale = ttk.LabeledScale(self.root, from_=0, to=10)
lscale.pack()
lscale.wait_visibility()
lscale.update()
linfo_1 = lscale.label.place_info()
prev_xcoord = lscale.scale.coords()[0]
self.assertEqual(prev_xcoord, int(linfo_1['x']))
# change range to: from -5 to 5. This should change the x coord of
# the scale widget, since 0 is at the middle of the new
# range.
lscale.scale.configure(from_=-5, to=5)
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
lscale.update()
curr_xcoord = lscale.scale.coords()[0]
self.assertNotEqual(prev_xcoord, curr_xcoord)
# the label widget should have been repositioned too
linfo_2 = lscale.label.place_info()
self.assertEqual(lscale.label['text'], 0 if self.wantobjects else '0')
self.assertEqual(curr_xcoord, int(linfo_2['x']))
# change the range back
lscale.scale.configure(from_=0, to=10)
self.assertNotEqual(prev_xcoord, curr_xcoord)
self.assertEqual(prev_xcoord, int(linfo_1['x']))
lscale.destroy()
def test_variable_change(self):
x = ttk.LabeledScale(self.root)
x.pack()
x.wait_visibility()
x.update()
curr_xcoord = x.scale.coords()[0]
newval = x.value + 1
x.value = newval
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
x.update()
self.assertEqual(x.value, newval)
self.assertEqual(x.label['text'],
newval if self.wantobjects else str(newval))
self.assertEqual(float(x.scale.get()), newval)
self.assertGreater(x.scale.coords()[0], curr_xcoord)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# value outside range
if self.wantobjects:
conv = lambda x: x
else:
conv = int
x.value = conv(x.scale['to']) + 1 # no changes shouldn't happen
x.update()
self.assertEqual(x.value, newval)
self.assertEqual(conv(x.label['text']), newval)
self.assertEqual(float(x.scale.get()), newval)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# non-integer value
x.value = newval = newval + 1.5
x.update()
self.assertEqual(x.value, int(newval))
self.assertEqual(conv(x.label['text']), int(newval))
self.assertEqual(float(x.scale.get()), newval)
x.destroy()
def test_resize(self):
x = ttk.LabeledScale(self.root)
x.pack(expand=True, fill='both')
x.wait_visibility()
x.update()
width, height = x.master.winfo_width(), x.master.winfo_height()
width_new, height_new = width * 2, height * 2
x.value = 3
x.update()
x.master.wm_geometry("%dx%d" % (width_new, height_new))
self.assertEqual(int(x.label.place_info()['x']),
x.scale.coords()[0])
# Reset geometry
x.master.wm_geometry("%dx%d" % (width, height))
x.destroy()
class OptionMenuTest(AbstractTkTest, unittest.TestCase):
def setUp(self):
super().setUp()
self.textvar = tkinter.StringVar(self.root)
def tearDown(self):
del self.textvar
super().tearDown()
def test_widget_destroy(self):
var = tkinter.StringVar(self.root)
optmenu = ttk.OptionMenu(self.root, var)
name = var._name
optmenu.update_idletasks()
optmenu.destroy()
self.assertEqual(optmenu.tk.globalgetvar(name), var.get())
del var
self.assertRaises(tkinter.TclError, optmenu.tk.globalgetvar, name)
def test_initialization(self):
self.assertRaises(tkinter.TclError,
ttk.OptionMenu, self.root, self.textvar, invalid='thing')
optmenu = ttk.OptionMenu(self.root, self.textvar, 'b', 'a', 'b')
self.assertEqual(optmenu._variable.get(), 'b')
self.assertTrue(optmenu['menu'])
self.assertTrue(optmenu['textvariable'])
optmenu.destroy()
def test_menu(self):
items = ('a', 'b', 'c')
default = 'a'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
found_default = False
for i in range(len(items)):
value = optmenu['menu'].entrycget(i, 'value')
self.assertEqual(value, items[i])
if value == default:
found_default = True
self.assertTrue(found_default)
optmenu.destroy()
# default shouldn't be in menu if it is not part of values
default = 'd'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
curr = None
i = 0
while True:
last, curr = curr, optmenu['menu'].entryconfigure(i, 'value')
if last == curr:
# no more menu entries
break
self.assertNotEqual(curr, default)
i += 1
self.assertEqual(i, len(items))
# check that variable is updated correctly
optmenu.pack()
optmenu.wait_visibility()
optmenu['menu'].invoke(0)
self.assertEqual(optmenu._variable.get(), items[0])
# changing to an invalid index shouldn't change the variable
self.assertRaises(tkinter.TclError, optmenu['menu'].invoke, -1)
self.assertEqual(optmenu._variable.get(), items[0])
optmenu.destroy()
# specifying a callback
success = []
def cb_test(item):
self.assertEqual(item, items[1])
success.append(True)
optmenu = ttk.OptionMenu(self.root, self.textvar, 'a', command=cb_test,
*items)
optmenu['menu'].invoke(1)
if not success:
self.fail("Menu callback not invoked")
optmenu.destroy()
tests_gui = (LabeledScaleTest, OptionMenuTest)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
|
#!/usr/bin/env python
"""
====================
Generators - Classic
====================
Unit tests for various classic graph generators in generators/classic.py
"""
from nose.tools import *
from networkx import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
is_isomorphic=graph_could_be_isomorphic
class TestGeneratorClassic():
def test_balanced_tree(self):
# balanced_tree(r,h) is a tree with (r**(h+1)-1)/(r-1) edges
for r,h in [(2,2),(3,3),(6,2)]:
t=balanced_tree(r,h)
order=t.order()
assert_true(order==(r**(h+1)-1)/(r-1))
assert_true(is_connected(t))
assert_true(t.size()==order-1)
dh = degree_histogram(t)
assert_equal(dh[0],0) # no nodes of 0
assert_equal(dh[1],r**h) # nodes of degree 1 are leaves
assert_equal(dh[r],1) # root is degree r
assert_equal(dh[r+1],order-r**h-1)# everyone else is degree r+1
assert_equal(len(dh),r+2)
def test_balanced_tree_star(self):
# balanced_tree(r,1) is the r-star
t=balanced_tree(r=2,h=1)
assert_true(is_isomorphic(t,star_graph(2)))
t=balanced_tree(r=5,h=1)
assert_true(is_isomorphic(t,star_graph(5)))
t=balanced_tree(r=10,h=1)
assert_true(is_isomorphic(t,star_graph(10)))
def test_full_rary_tree(self):
r=2
n=9
t=full_rary_tree(r,n)
assert_equal(t.order(),n)
assert_true(is_connected(t))
dh = degree_histogram(t)
assert_equal(dh[0],0) # no nodes of 0
assert_equal(dh[1],5) # nodes of degree 1 are leaves
assert_equal(dh[r],1) # root is degree r
assert_equal(dh[r+1],9-5-1) # everyone else is degree r+1
assert_equal(len(dh),r+2)
def test_full_rary_tree_balanced(self):
t=full_rary_tree(2,15)
th=balanced_tree(2,3)
assert_true(is_isomorphic(t,th))
def test_full_rary_tree_path(self):
t=full_rary_tree(1,10)
assert_true(is_isomorphic(t,path_graph(10)))
def test_full_rary_tree_empty(self):
t=full_rary_tree(0,10)
assert_true(is_isomorphic(t,empty_graph(10)))
t=full_rary_tree(3,0)
assert_true(is_isomorphic(t,empty_graph(0)))
def test_full_rary_tree_3_20(self):
t=full_rary_tree(3,20)
assert_equal(t.order(),20)
def test_barbell_graph(self):
# number of nodes = 2*m1 + m2 (2 m1-complete graphs + m2-path + 2 edges)
# number of edges = 2*(number_of_edges(m1-complete graph) + m2 + 1
m1=3; m2=5
b=barbell_graph(m1,m2)
assert_true(number_of_nodes(b)==2*m1+m2)
assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
assert_equal(b.name, 'barbell_graph(3,5)')
m1=4; m2=10
b=barbell_graph(m1,m2)
assert_true(number_of_nodes(b)==2*m1+m2)
assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
assert_equal(b.name, 'barbell_graph(4,10)')
m1=3; m2=20
b=barbell_graph(m1,m2)
assert_true(number_of_nodes(b)==2*m1+m2)
assert_true(number_of_edges(b)==m1*(m1-1) + m2 + 1)
assert_equal(b.name, 'barbell_graph(3,20)')
# Raise NetworkXError if m1<2
m1=1; m2=20
assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2)
# Raise NetworkXError if m2<0
m1=5; m2=-2
assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2)
# barbell_graph(2,m) = path_graph(m+4)
m1=2; m2=5
b=barbell_graph(m1,m2)
assert_true(is_isomorphic(b, path_graph(m2+4)))
m1=2; m2=10
b=barbell_graph(m1,m2)
assert_true(is_isomorphic(b, path_graph(m2+4)))
m1=2; m2=20
b=barbell_graph(m1,m2)
assert_true(is_isomorphic(b, path_graph(m2+4)))
assert_raises(networkx.exception.NetworkXError, barbell_graph, m1, m2,
create_using=DiGraph())
mb=barbell_graph(m1, m2, create_using=MultiGraph())
assert_true(mb.edges()==b.edges())
def test_complete_graph(self):
# complete_graph(m) is a connected graph with
# m nodes and m*(m+1)/2 edges
for m in [0, 1, 3, 5]:
g = complete_graph(m)
assert_true(number_of_nodes(g) == m)
assert_true(number_of_edges(g) == m * (m - 1) // 2)
mg=complete_graph(m, create_using=MultiGraph())
assert_true(mg.edges()==g.edges())
def test_complete_digraph(self):
# complete_graph(m) is a connected graph with
# m nodes and m*(m+1)/2 edges
for m in [0, 1, 3, 5]:
g = complete_graph(m,create_using=nx.DiGraph())
assert_true(number_of_nodes(g) == m)
assert_true(number_of_edges(g) == m * (m - 1))
def test_circular_ladder_graph(self):
G=circular_ladder_graph(5)
assert_raises(networkx.exception.NetworkXError, circular_ladder_graph,
5, create_using=DiGraph())
mG=circular_ladder_graph(5, create_using=MultiGraph())
assert_equal(mG.edges(), G.edges())
def test_cycle_graph(self):
G=cycle_graph(4)
assert_equal(sorted(G.edges()), [(0, 1), (0, 3), (1, 2), (2, 3)])
mG=cycle_graph(4, create_using=MultiGraph())
assert_equal(sorted(mG.edges()), [(0, 1), (0, 3), (1, 2), (2, 3)])
G=cycle_graph(4, create_using=DiGraph())
assert_false(G.has_edge(2,1))
assert_true(G.has_edge(1,2))
def test_dorogovtsev_goltsev_mendes_graph(self):
G=dorogovtsev_goltsev_mendes_graph(0)
assert_equal(G.edges(), [(0, 1)])
assert_equal(G.nodes(), [0, 1])
G=dorogovtsev_goltsev_mendes_graph(1)
assert_equal(G.edges(), [(0, 1), (0, 2), (1, 2)])
assert_equal(average_clustering(G), 1.0)
assert_equal(list(triangles(G).values()), [1, 1, 1])
G=dorogovtsev_goltsev_mendes_graph(10)
assert_equal(number_of_nodes(G), 29526)
assert_equal(number_of_edges(G), 59049)
assert_equal(G.degree(0), 1024)
assert_equal(G.degree(1), 1024)
assert_equal(G.degree(2), 1024)
assert_raises(networkx.exception.NetworkXError,
dorogovtsev_goltsev_mendes_graph, 7,
create_using=DiGraph())
assert_raises(networkx.exception.NetworkXError,
dorogovtsev_goltsev_mendes_graph, 7,
create_using=MultiGraph())
def test_empty_graph(self):
G=empty_graph()
assert_equal(number_of_nodes(G), 0)
G=empty_graph(42)
assert_equal(number_of_nodes(G), 42)
assert_equal(number_of_edges(G), 0)
assert_equal(G.name, 'empty_graph(42)')
# create empty digraph
G=empty_graph(42,create_using=DiGraph(name="duh"))
assert_equal(number_of_nodes(G), 42)
assert_equal(number_of_edges(G), 0)
assert_equal(G.name, 'empty_graph(42)')
assert_true(isinstance(G,DiGraph))
# create empty multigraph
G=empty_graph(42,create_using=MultiGraph(name="duh"))
assert_equal(number_of_nodes(G), 42)
assert_equal(number_of_edges(G), 0)
assert_equal(G.name, 'empty_graph(42)')
assert_true(isinstance(G,MultiGraph))
# create empty graph from another
pete=petersen_graph()
G=empty_graph(42,create_using=pete)
assert_equal(number_of_nodes(G), 42)
assert_equal(number_of_edges(G), 0)
assert_equal(G.name, 'empty_graph(42)')
assert_true(isinstance(G,Graph))
def test_grid_2d_graph(self):
n=5;m=6
G=grid_2d_graph(n,m)
assert_equal(number_of_nodes(G), n*m)
assert_equal(degree_histogram(G), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
DG=grid_2d_graph(n,m, create_using=DiGraph())
assert_equal(DG.succ, G.adj)
assert_equal(DG.pred, G.adj)
MG=grid_2d_graph(n,m, create_using=MultiGraph())
assert_equal(MG.edges(), G.edges())
def test_grid_graph(self):
"""grid_graph([n,m]) is a connected simple graph with the
following properties:
number_of_nodes=n*m
degree_histogram=[0,0,4,2*(n+m)-8,(n-2)*(m-2)]
"""
for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]:
dim=[n,m]
g=grid_graph(dim)
assert_equal(number_of_nodes(g), n*m)
assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
assert_equal(dim,[n,m])
for n, m in [(1, 5), (5, 1)]:
dim=[n,m]
g=grid_graph(dim)
assert_equal(number_of_nodes(g), n*m)
assert_true(is_isomorphic(g,path_graph(5)))
assert_equal(dim,[n,m])
# mg=grid_graph([n,m], create_using=MultiGraph())
# assert_equal(mg.edges(), g.edges())
def test_hypercube_graph(self):
for n, G in [(0, null_graph()), (1, path_graph(2)),
(2, cycle_graph(4)), (3, cubical_graph())]:
g=hypercube_graph(n)
assert_true(is_isomorphic(g, G))
g=hypercube_graph(4)
assert_equal(degree_histogram(g), [0, 0, 0, 0, 16])
g=hypercube_graph(5)
assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 32])
g=hypercube_graph(6)
assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 0, 64])
# mg=hypercube_graph(6, create_using=MultiGraph())
# assert_equal(mg.edges(), g.edges())
def test_ladder_graph(self):
for i, G in [(0, empty_graph(0)), (1, path_graph(2)),
(2, hypercube_graph(2)), (10, grid_graph([2,10]))]:
assert_true(is_isomorphic(ladder_graph(i), G))
assert_raises(networkx.exception.NetworkXError,
ladder_graph, 2, create_using=DiGraph())
g = ladder_graph(2)
mg=ladder_graph(2, create_using=MultiGraph())
assert_equal(mg.edges(), g.edges())
def test_lollipop_graph(self):
# number of nodes = m1 + m2
# number of edges = number_of_edges(complete_graph(m1)) + m2
for m1, m2 in [(3, 5), (4, 10), (3, 20)]:
b=lollipop_graph(m1,m2)
assert_equal(number_of_nodes(b), m1+m2)
assert_equal(number_of_edges(b), m1*(m1-1)/2 + m2)
assert_equal(b.name,
'lollipop_graph(' + str(m1) + ',' + str(m2) + ')')
# Raise NetworkXError if m<2
assert_raises(networkx.exception.NetworkXError,
lollipop_graph, 1, 20)
# Raise NetworkXError if n<0
assert_raises(networkx.exception.NetworkXError,
lollipop_graph, 5, -2)
# lollipop_graph(2,m) = path_graph(m+2)
for m1, m2 in [(2, 5), (2, 10), (2, 20)]:
b=lollipop_graph(m1,m2)
assert_true(is_isomorphic(b, path_graph(m2+2)))
assert_raises(networkx.exception.NetworkXError,
lollipop_graph, m1, m2, create_using=DiGraph())
mb=lollipop_graph(m1, m2, create_using=MultiGraph())
assert_true(mb.edges(), b.edges())
def test_null_graph(self):
assert_equal(number_of_nodes(null_graph()), 0)
def test_path_graph(self):
p=path_graph(0)
assert_true(is_isomorphic(p, null_graph()))
assert_equal(p.name, 'path_graph(0)')
p=path_graph(1)
assert_true(is_isomorphic( p, empty_graph(1)))
assert_equal(p.name, 'path_graph(1)')
p=path_graph(10)
assert_true(is_connected(p))
assert_equal(sorted(list(p.degree().values())),
[1, 1, 2, 2, 2, 2, 2, 2, 2, 2])
assert_equal(p.order()-1, p.size())
dp=path_graph(3, create_using=DiGraph())
assert_true(dp.has_edge(0,1))
assert_false(dp.has_edge(1,0))
mp=path_graph(10, create_using=MultiGraph())
assert_true(mp.edges()==p.edges())
def test_periodic_grid_2d_graph(self):
g=grid_2d_graph(0,0, periodic=True)
assert_equal(g.degree(), {})
for m, n, G in [(2, 2, cycle_graph(4)), (1, 7, cycle_graph(7)),
(7, 1, cycle_graph(7)), (2, 5, circular_ladder_graph(5)),
(5, 2, circular_ladder_graph(5)), (2, 4, cubical_graph()),
(4, 2, cubical_graph())]:
g=grid_2d_graph(m,n, periodic=True)
assert_true(is_isomorphic(g, G))
DG=grid_2d_graph(4, 2, periodic=True, create_using=DiGraph())
assert_equal(DG.succ,g.adj)
assert_equal(DG.pred,g.adj)
MG=grid_2d_graph(4, 2, periodic=True, create_using=MultiGraph())
assert_equal(MG.edges(),g.edges())
def test_star_graph(self):
assert_true(is_isomorphic(star_graph(0), empty_graph(1)))
assert_true(is_isomorphic(star_graph(1), path_graph(2)))
assert_true(is_isomorphic(star_graph(2), path_graph(3)))
s=star_graph(10)
assert_equal(sorted(list(s.degree().values())),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10])
assert_raises(networkx.exception.NetworkXError,
star_graph, 10, create_using=DiGraph())
ms=star_graph(10, create_using=MultiGraph())
assert_true(ms.edges()==s.edges())
def test_trivial_graph(self):
assert_equal(number_of_nodes(trivial_graph()), 1)
def test_wheel_graph(self):
for n, G in [(0, null_graph()), (1, empty_graph(1)),
(2, path_graph(2)), (3, complete_graph(3)),
(4, complete_graph(4))]:
g=wheel_graph(n)
assert_true(is_isomorphic( g, G))
assert_equal(g.name, 'wheel_graph(4)')
g=wheel_graph(10)
assert_equal(sorted(list(g.degree().values())),
[3, 3, 3, 3, 3, 3, 3, 3, 3, 9])
assert_raises(networkx.exception.NetworkXError,
wheel_graph, 10, create_using=DiGraph())
mg=wheel_graph(10, create_using=MultiGraph())
assert_equal(mg.edges(), g.edges())
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for report_utils.py."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from aqt.utils import report_utils
EventSeries = report_utils.EventSeries
class ReportUtilsTest(parameterized.TestCase):
def setUp(self):
super(ReportUtilsTest, self).setUp()
self.event_series_a = EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([1, 2, 3, 4, 5, 6]))
self.event_series_b = EventSeries(
name='test_series_b',
steps=onp.array([0, 2, 4, 6, 8, 10, 12]),
values=onp.array([1, 2, 3, 0, 0, 3, 6]))
self.event_series_with_nans = EventSeries(
name='test_series_with_nans',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([1, 2, 3, 4, onp.nan, onp.nan]))
self.eval_freq = 2
self.events_dict = {
'a': self.event_series_a,
'b': self.event_series_b,
'with_nans': self.event_series_with_nans
}
def assertEventSeriesEqual(self, x, y):
self.assertEqual(x.name, y.name)
onp.testing.assert_array_almost_equal(x.steps, y.steps)
onp.testing.assert_array_almost_equal(x.values, y.values)
if x.wall_times is None:
self.assertEqual(x.wall_times, y.wall_times)
else:
onp.testing.assert_array_almost_equal(x.wall_times, y.wall_times)
@parameterized.named_parameters(
dict(
testcase_name='wdsz_1_st_0',
window_size_in_steps=1,
step=0,
exp=1,
),
dict(
testcase_name='wdsz_2_st_1',
window_size_in_steps=2,
step=1,
exp=(1 + 2) / 2,
),
dict(
testcase_name='wdsz_2_st_2',
window_size_in_steps=2,
step=2,
exp=2,
),
dict(
testcase_name='wdsz_4_st_0',
window_size_in_steps=4,
step=0,
exp=(1 + 2) / 2,
),
dict(
testcase_name='wdsz_4_st_2',
window_size_in_steps=4,
step=2,
exp=(1 + 2 + 3) / 3,
),
dict(
testcase_name='wdsz_12_st_6',
window_size_in_steps=12,
step=6,
exp=(1 + 2 + 3 + 4 + 5 + 6) / 6,
),
dict(
testcase_name='wdsz_12_st_8',
window_size_in_steps=12,
step=8,
exp=(2 + 3 + 4 + 5 + 6) / 5,
),
)
def test_apply_smoothing_about_step_with_rectangular_kernel(
self, window_size_in_steps, step, exp):
smoothing_kernel = report_utils.SmoothingKernel.RECTANGULAR
rectangular_kernel_fn = smoothing_kernel.get_func(
window_size_in_steps=window_size_in_steps)
res = report_utils.apply_smoothing_about_step(self.event_series_a, step,
rectangular_kernel_fn)
self.assertAlmostEqual(res, exp)
@parameterized.named_parameters(
dict(
testcase_name='wdsz_1_st_0',
window_size_in_steps=1,
step=0,
exp=1,
),
dict(
testcase_name='wdsz_3_st_1',
window_size_in_steps=3,
step=1,
exp=(1 + 2) / 2,
),
dict(
testcase_name='wdsz_2_st_2',
window_size_in_steps=2,
step=2,
exp=2,
),
dict(
testcase_name='wdsz_4_st_0',
window_size_in_steps=4,
step=0,
exp=1,
),
dict(
testcase_name='wdsz_4_st_2',
window_size_in_steps=4,
step=2,
exp=2,
),
dict(
testcase_name='wdsz_12_st_6',
window_size_in_steps=12,
step=6,
exp=(2 * 2 + 3 * 4 + 4 * 6 + 5 * 4 + 6 * 2) / 18,
),
dict(
testcase_name='wdsz_12_st_8',
window_size_in_steps=12,
step=8,
exp=(3 * 2 + 4 * 4 + 5 * 6 + 6 * 4) / 16,
),
)
def test_apply_smoothing_about_step_with_triangular_kernel(
self, window_size_in_steps, step, exp):
smoothing_kernel = report_utils.SmoothingKernel.TRIANGULAR
kernel_fn = smoothing_kernel.get_func(
window_size_in_steps=window_size_in_steps)
res = report_utils.apply_smoothing_about_step(self.event_series_a, step,
kernel_fn)
self.assertAlmostEqual(res, exp)
@parameterized.named_parameters(
dict(
testcase_name='wdsz_0',
event_series_key='a',
window_size_in_steps=0,
step=0,
),
dict(
testcase_name='with_nans',
event_series_key='with_nans',
window_size_in_steps=5,
step=0,
),
)
def test_apply_smoothing_about_step_raise_value_error(self, event_series_key,
window_size_in_steps,
step):
smoothing_kernel = report_utils.SmoothingKernel.RECTANGULAR
rectangular_kernel_fn = smoothing_kernel.get_func(
window_size_in_steps=window_size_in_steps)
with self.assertRaises(ValueError):
report_utils.apply_smoothing_about_step(
self.events_dict[event_series_key], step, rectangular_kernel_fn)
@parameterized.named_parameters(
dict(
testcase_name='wdsz_2',
window_size_in_steps=2,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([1, 2, 3, 4, 5, 6])),
),
dict(
testcase_name='wdsz_4',
window_size_in_steps=4,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([(1 + 2) / 2, (1 + 2 + 3) / 3, (2 + 3 + 4) / 3,
(3 + 4 + 5) / 3, (4 + 5 + 6) / 3,
(5 + 6) / 2])),
),
dict(
testcase_name='wdsz_6',
window_size_in_steps=6,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([(1 + 2) / 2, (1 + 2 + 3) / 3, (2 + 3 + 4) / 3,
(3 + 4 + 5) / 3, (4 + 5 + 6) / 3,
(5 + 6) / 2])),
),
dict(
testcase_name='wdsz_12',
window_size_in_steps=12,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([(1 + 2 + 3 + 4) / 4, (1 + 2 + 3 + 4 + 5) / 5,
(1 + 2 + 3 + 4 + 5 + 6) / 6,
(1 + 2 + 3 + 4 + 5 + 6) / 6,
(2 + 3 + 4 + 5 + 6) / 5, (3 + 4 + 5 + 6) / 4])),
),
)
def test_apply_smoothing_with_rectangular_kernel(self, window_size_in_steps,
exp):
smoothing_kernel = report_utils.SmoothingKernel.RECTANGULAR
rectangular_kernel_fn = smoothing_kernel.get_func(
window_size_in_steps=window_size_in_steps)
res = report_utils.apply_smoothing(self.event_series_a,
rectangular_kernel_fn)
self.assertEventSeriesEqual(res, exp)
@parameterized.named_parameters(
dict(
testcase_name='wdsz_2',
window_size_in_steps=2,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([1, 2, 3, 4, 5, 6])),
),
dict(
testcase_name='wdsz_4',
window_size_in_steps=4,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([1, 2, 3, 4, 5, 6])),
),
dict(
testcase_name='wdsz_6',
window_size_in_steps=6,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([(1 * 3 + 2) / 4,
(1 + 2 * 3 + 3) / 5,
(2 + 3 * 3 + 4) / 5,
(3 + 4 * 3 + 5) / 5,
(4 + 5 * 3 + 6) / 5,
(5 + 6 * 3) / 4])),
),
dict(
testcase_name='wdsz_12',
window_size_in_steps=12,
exp=EventSeries(
name='test_series_a',
steps=onp.array([0, 2, 4, 6, 8, 10]),
values=onp.array([(1 * 6 + 2 * 4 + 3 * 2) / 12,
(1 * 4 + 2 * 6 + 3 * 4 + 4 * 2) / 16,
(1 * 2 + 2 * 4 + 3 * 6 + 4 * 4 + 5 * 2) / 18,
(2 * 2 + 3 * 4 + 4 * 6 + 5 * 4 + 6 * 2) / 18,
(3 * 2 + 4 * 4 + 5 * 6 + 6 * 4) / 16,
(4 * 2 + 5 * 4 + 6 * 6) / 12])),
),
)
def test_apply_smoothing_with_triangular_kernel(self, window_size_in_steps,
exp):
smoothing_kernel = report_utils.SmoothingKernel.TRIANGULAR
kernel_fn = smoothing_kernel.get_func(
window_size_in_steps=window_size_in_steps)
res = report_utils.apply_smoothing(self.event_series_a,
kernel_fn)
self.assertEventSeriesEqual(res, exp)
@parameterized.named_parameters(
dict(
testcase_name='two_mins',
event_series_key='b',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=0,
exp=6),
dict(
testcase_name='min_start_step_4',
event_series_key='b',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=4,
exp=6),
dict(
testcase_name='min_start_step_10',
event_series_key='b',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=10,
exp=10),
dict(
testcase_name='max_start_step_0',
event_series_key='b',
early_stop_agg=report_utils.MinOrMax.MAX,
start_step=0,
exp=12),
dict(
testcase_name='with_nans_min_start_step_0',
event_series_key='with_nans',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=0,
exp=0),
dict(
testcase_name='with_nans_min_start_step_2',
event_series_key='with_nans',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=2,
exp=2),
dict(
testcase_name='with_nans_min_start_step_after_nans',
event_series_key='with_nans',
early_stop_agg=report_utils.MinOrMax.MIN,
start_step=8,
exp=8),
dict(
testcase_name='with_nans_max_start_step_0',
event_series_key='with_nans',
early_stop_agg=report_utils.MinOrMax.MAX,
start_step=0,
exp=6),
)
def test_find_early_stop_step(self, event_series_key, early_stop_agg,
start_step, exp):
early_stop_func = early_stop_agg.get_func()
res = report_utils.find_early_stop_step(
self.events_dict[event_series_key],
early_stop_func=early_stop_func,
start_step=start_step)
self.assertEqual(res, exp)
def test_find_early_stop_step_raises_value_error_on_too_large_start_step(
self):
start_step = 20
with self.assertRaisesRegex(
ValueError, 'event_series does not have events after start_step.'):
report_utils.find_early_stop_step(
self.event_series_b,
early_stop_func=onp.argmin,
start_step=start_step)
@parameterized.named_parameters(
dict(
testcase_name='min', early_stop_agg=report_utils.MinOrMax.MIN, exp=0),
dict(
testcase_name='max', early_stop_agg=report_utils.MinOrMax.MAX, exp=1),
)
def test_get_early_stop_func(self, early_stop_agg, exp):
early_stop_func = early_stop_agg.get_func()
res = early_stop_func(onp.array([1, 2]))
self.assertEqual(res, exp)
def test_get_early_stop_func_raises_error_on_unknown_agg(self):
with self.assertRaises(AttributeError):
early_stop_agg = 'bad input'
early_stop_agg.get_func()
def test_get_smoothing_kernel_func(self):
smoothing_kernel = report_utils.SmoothingKernel.RECTANGULAR
rect_kernel_func = smoothing_kernel.get_func(window_size_in_steps=3)
self.assertEqual(rect_kernel_func(1), 1.)
self.assertEqual(rect_kernel_func(2), 0.)
self.assertEqual(rect_kernel_func(3), 0.)
def test_get_smoothing_kernel_func_raises_error_on_unknown_agg(self):
smoothing_kernel = 'bad input'
with self.assertRaises(AttributeError):
smoothing_kernel.get_func(window_size_in_steps=3)
def test_get_smoothing_kernel_func_raises_error_on_rect_kernel_without_window_size(
self):
smoothing_kernel = report_utils.SmoothingKernel.RECTANGULAR
with self.assertRaises(ValueError):
smoothing_kernel.get_func(window_size_in_steps=None)
@parameterized.named_parameters(
dict(
testcase_name='no_nans',
event_series_key='a',
start_step=0,
exp=None),
dict(
testcase_name='with_nans',
event_series_key='with_nans',
start_step=0,
exp=8),
dict(
testcase_name='with_nans_start_step_after_nan',
event_series_key='with_nans',
start_step=10,
exp=10),
)
def test_check_for_nans(self, event_series_key, start_step, exp):
first_nan_step = report_utils.check_for_nans(
self.events_dict[event_series_key], start_step=start_step)
self.assertEqual(first_nan_step, exp)
@parameterized.named_parameters(
dict(
testcase_name='no_nans',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 2, 3, 4])),
}
},
exp=None),
dict(
testcase_name='with_nans',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 2, onp.nan, onp.nan])),
}
},
exp=6),
dict(
testcase_name='with_nan_in_multiple_series',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, onp.nan, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 2, onp.nan, onp.nan])),
}
},
exp=4),
)
def test_check_all_events_for_nans(self, all_events, exp):
first_nan_step = report_utils.check_all_events_for_nans(all_events)
self.assertEqual(first_nan_step, exp)
@parameterized.named_parameters(
dict(
testcase_name='no_nans_no_smoothing',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 1, 4, 4])),
}
},
early_stop_step=4,
smoothing=False,
exp_agg_metrics={
'train': {
'acc': 2,
},
'eval': {
'loss': 1,
},
}),
dict(
testcase_name='no_nans_with_smoothing',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 1, 4, 4])),
}
},
early_stop_step=4,
smoothing=True,
exp_agg_metrics={
'train': {
'acc': 2,
},
'eval': {
'loss': 3,
},
}),
dict(
testcase_name='with_nans_no_smoothing',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 1, onp.nan, onp.nan])),
}
},
early_stop_step=4,
smoothing=False,
exp_agg_metrics={
'train': {
'acc': 2,
},
'eval': {
'loss': 1,
},
}),
)
def test_get_agg_metrics_at_step(self, all_events, early_stop_step, smoothing,
exp_agg_metrics):
if smoothing:
smoothing_kernel_fn = report_utils.SmoothingKernel.RECTANGULAR.get_func(
window_size_in_steps=5)
else:
smoothing_kernel_fn = None
agg_metrics = report_utils.get_agg_metrics_at_step(
all_events, early_stop_step, smoothing_kernel_fn)
self.assertEqual(agg_metrics, exp_agg_metrics)
@parameterized.named_parameters(
dict(
testcase_name='no_nans',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 2, 1, 3])),
}
},
exp_stop_step=6,
exp_first_nan_step=None,
exp_agg_metrics={
'train': {
'acc': 3,
},
'eval': {
'loss': 2,
},
},
exp_agg_metrics_unsmoothed={
'train': {
'acc': 3,
},
'eval': {
'loss': 1,
}
}),
dict(
testcase_name='with_nans',
all_events={
'train': {
'acc':
EventSeries(
name='acc',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([0, 1, 2, 3, 4])),
},
'eval': {
'loss':
EventSeries(
name='loss',
steps=onp.array([0, 2, 4, 6, 8]),
values=onp.array([2, 4, 1, onp.nan, onp.nan])),
}
},
exp_stop_step=4,
exp_first_nan_step=6,
exp_agg_metrics=None,
exp_agg_metrics_unsmoothed={
'train': {
'acc': 2,
},
'eval': {
'loss': 1,
}
}),
)
def test_compute_agg_metrics_from_events(self, all_events, exp_stop_step,
exp_first_nan_step, exp_agg_metrics,
exp_agg_metrics_unsmoothed):
agg_metrics_unsmoothed, agg_metrics, stop_step, first_nan_step = report_utils.compute_agg_metrics_from_events(
all_events=all_events,
early_stop_component='eval',
early_stop_attr='loss',
early_stop_agg=report_utils.MinOrMax.MIN,
smoothing_kernel=report_utils.SmoothingKernel.RECTANGULAR,
window_size_in_steps=5,
start_step=2,
)
self.assertEqual(stop_step, exp_stop_step)
self.assertEqual(first_nan_step, exp_first_nan_step)
self.assertEqual(agg_metrics, exp_agg_metrics)
self.assertEqual(agg_metrics_unsmoothed, exp_agg_metrics_unsmoothed)
if __name__ == '__main__':
absltest.main()
|
|
# coding: utf-8
"""
This file was originally derived from
https://github.com/pypa/pip/blob/3e713708088aedb1cde32f3c94333d6e29aaf86e/src/pip/_internal/pep425tags.py
The following license covers that code:
Copyright (c) 2008-2018 The pip developers (see AUTHORS.txt file)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import os
import ctypes
import re
import platform
if sys.version_info >= (2, 7):
import sysconfig
if sys.version_info < (3,):
str_cls = unicode # noqa
else:
str_cls = str
def _pep425_implementation():
"""
:return:
A 2 character unicode string of the implementation - 'cp' for cpython
or 'pp' for PyPy
"""
return 'pp' if hasattr(sys, 'pypy_version_info') else 'cp'
def _pep425_version():
"""
:return:
A tuple of integers representing the Python version number
"""
if hasattr(sys, 'pypy_version_info'):
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return (sys.version_info[0], sys.version_info[1])
def _pep425_supports_manylinux():
"""
:return:
A boolean indicating if the machine can use manylinux1 packages
"""
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
pass
# Check for glibc 2.5
try:
proc = ctypes.CDLL(None)
gnu_get_libc_version = proc.gnu_get_libc_version
gnu_get_libc_version.restype = ctypes.c_char_p
ver = gnu_get_libc_version()
if not isinstance(ver, str_cls):
ver = ver.decode('ascii')
match = re.match(r'(\d+)\.(\d+)', ver)
return match and match.group(1) == '2' and int(match.group(2)) >= 5
except (AttributeError):
return False
def _pep425_get_abi():
"""
:return:
A unicode string of the system abi. Will be something like: "cp27m",
"cp33m", etc.
"""
try:
soabi = sysconfig.get_config_var('SOABI')
if soabi:
if soabi.startswith('cpython-'):
return 'cp%s' % soabi.split('-')[1]
return soabi.replace('.', '_').replace('-', '_')
except (IOError, NameError):
pass
impl = _pep425_implementation()
suffix = ''
if impl == 'cp':
suffix += 'm'
if sys.maxunicode == 0x10ffff and sys.version_info < (3, 3):
suffix += 'u'
return '%s%s%s' % (impl, ''.join(map(str_cls, _pep425_version())), suffix)
def _pep425tags():
"""
:return:
A list of 3-element tuples with unicode strings or None:
[0] implementation tag - cp33, pp27, cp26, py2, py2.py3
[1] abi tag - cp26m, None
[2] arch tag - linux_x86_64, macosx_10_10_x85_64, etc
"""
tags = []
versions = []
version_info = _pep425_version()
major = version_info[:-1]
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = _pep425_implementation()
abis = []
abi = _pep425_get_abi()
if abi:
abis.append(abi)
abi3 = _pep425_implementation() == 'cp' and sys.version_info >= (3,)
if abi3:
abis.append('abi3')
abis.append('none')
if sys.platform == 'darwin':
plat_ver = platform.mac_ver()
ver_parts = plat_ver[0].split('.')
minor = int(ver_parts[1])
arch = plat_ver[2]
if sys.maxsize == 2147483647:
arch = 'i386'
arches = []
while minor > 5:
arches.append('macosx_10_%s_%s' % (minor, arch))
arches.append('macosx_10_%s_intel' % (minor,))
arches.append('macosx_10_%s_universal' % (minor,))
minor -= 1
else:
if sys.platform == 'win32':
if 'amd64' in sys.version.lower():
arches = ['win_amd64']
arches = [sys.platform]
elif hasattr(os, 'uname'):
(plat, _, _, _, machine) = os.uname()
plat = plat.lower().replace('/', '')
machine.replace(' ', '_').replace('/', '_')
if plat == 'linux' and sys.maxsize == 2147483647:
machine = 'i686'
arch = '%s_%s' % (plat, machine)
if _pep425_supports_manylinux():
arches = [arch.replace('linux', 'manylinux1'), arch]
else:
arches = [arch]
for abi in abis:
for arch in arches:
tags.append(('%s%s' % (impl, versions[0]), abi, arch))
if abi3:
for version in versions[1:]:
for arch in arches:
tags.append(('%s%s' % (impl, version), 'abi3', arch))
for arch in arches:
tags.append(('py%s' % (versions[0][0]), 'none', arch))
tags.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
tags.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
for i, version in enumerate(versions):
tags.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
tags.append(('py%s' % (version[0]), 'none', 'any'))
tags.append(('py2.py3', 'none', 'any'))
return tags
|
|
from sqlalchemy import schema as sa_schema, types as sqltypes, sql
import logging
from .. import compat
import re
from ..compat import string_types
log = logging.getLogger(__name__)
def _render_potential_expr(value, autogen_context):
if isinstance(value, sql.ClauseElement):
if compat.sqla_08:
compile_kw = dict(compile_kwargs={'literal_binds': True})
else:
compile_kw = {}
return "%(prefix)stext(%(sql)r)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"sql": str(
value.compile(dialect=autogen_context['dialect'],
**compile_kw)
)
}
else:
return repr(value)
def _add_table(table, autogen_context):
text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
'tablename': table.name,
'prefix': _alembic_autogenerate_prefix(autogen_context),
'args': ',\n'.join(
[col for col in
[_render_column(col, autogen_context) for col in table.c]
if col] +
sorted([rcons for rcons in
[_render_constraint(cons, autogen_context) for cons in
table.constraints]
if rcons is not None
])
)
}
if table.schema:
text += ",\nschema=%r" % table.schema
for k in sorted(table.kwargs):
text += ",\n%s=%r" % (k.replace(" ", "_"), table.kwargs[k])
text += "\n)"
return text
def _drop_table(table, autogen_context):
text = "%(prefix)sdrop_table(%(tname)r" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": table.name
}
if table.schema:
text += ", schema=%r" % table.schema
text += ")"
return text
def _add_index(index, autogen_context):
"""
Generate Alembic operations for the CREATE INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
from .compare import _get_index_column_names
text = "%(prefix)screate_index('%(name)s', '%(table)s', %(columns)s, "\
"unique=%(unique)r%(schema)s%(kwargs)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': index.name,
'table': index.table.name,
'columns': _get_index_column_names(index),
'unique': index.unique or False,
'schema': (", schema='%s'" % index.table.schema) if index.table.schema else '',
'kwargs': (', '+', '.join(
["%s=%s" % (key, _render_potential_expr(val, autogen_context))
for key, val in index.kwargs.items()]))\
if len(index.kwargs) else ''
}
return text
def _drop_index(index, autogen_context):
"""
Generate Alembic operations for the DROP INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
text = "%(prefix)sdrop_index('%(name)s', "\
"table_name='%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': index.name,
'table_name': index.table.name,
'schema': ((", schema='%s'" % index.table.schema)
if index.table.schema else '')
}
return text
def _render_unique_constraint(constraint, autogen_context):
rendered = _user_defined_render("unique", constraint, autogen_context)
if rendered is not False:
return rendered
return _uq_constraint(constraint, autogen_context, False)
def _add_unique_constraint(constraint, autogen_context):
"""
Generate Alembic operations for the ALTER TABLE .. ADD CONSTRAINT ...
UNIQUE of a :class:`~sqlalchemy.schema.UniqueConstraint` instance.
"""
return _uq_constraint(constraint, autogen_context, True)
def _uq_constraint(constraint, autogen_context, alter):
opts = []
if constraint.deferrable:
opts.append(("deferrable", str(constraint.deferrable)))
if constraint.initially:
opts.append(("initially", str(constraint.initially)))
if alter and constraint.table.schema:
opts.append(("schema", str(constraint.table.schema)))
if not alter and constraint.name:
opts.append(("name", constraint.name))
if alter:
args = [repr(constraint.name), repr(constraint.table.name)]
args.append(repr([col.name for col in constraint.columns]))
args.extend(["%s=%r" % (k, v) for k, v in opts])
return "%(prefix)screate_unique_constraint(%(args)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'args': ", ".join(args)
}
else:
args = [repr(col.name) for col in constraint.columns]
args.extend(["%s=%r" % (k, v) for k, v in opts])
return "%(prefix)sUniqueConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(args)
}
def _add_fk_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_pk_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_check_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_constraint(constraint, autogen_context):
"""
Dispatcher for the different types of constraints.
"""
funcs = {
"unique_constraint": _add_unique_constraint,
"foreign_key_constraint": _add_fk_constraint,
"primary_key_constraint": _add_pk_constraint,
"check_constraint": _add_check_constraint,
"column_check_constraint": _add_check_constraint,
}
return funcs[constraint.__visit_name__](constraint, autogen_context)
def _drop_constraint(constraint, autogen_context):
"""
Generate Alembic operations for the ALTER TABLE ... DROP CONSTRAINT
of a :class:`~sqlalchemy.schema.UniqueConstraint` instance.
"""
text = "%(prefix)sdrop_constraint(%(name)r, '%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': constraint.name,
'table_name': constraint.table.name,
'schema': (", schema='%s'" % constraint.table.schema)
if constraint.table.schema else '',
}
return text
def _add_column(schema, tname, column, autogen_context):
text = "%(prefix)sadd_column(%(tname)r, %(column)s" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"column": _render_column(column, autogen_context)
}
if schema:
text += ", schema=%r" % schema
text += ")"
return text
def _drop_column(schema, tname, column, autogen_context):
text = "%(prefix)sdrop_column(%(tname)r, %(cname)r" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"cname": column.name
}
if schema:
text += ", schema=%r" % schema
text += ")"
return text
def _modify_col(tname, cname,
autogen_context,
server_default=False,
type_=None,
nullable=None,
existing_type=None,
existing_nullable=None,
existing_server_default=False,
schema=None):
indent = " " * 11
text = "%(prefix)salter_column(%(tname)r, %(cname)r" % {
'prefix': _alembic_autogenerate_prefix(
autogen_context),
'tname': tname,
'cname': cname}
text += ",\n%sexisting_type=%s" % (indent,
_repr_type(existing_type, autogen_context))
if server_default is not False:
rendered = _render_server_default(
server_default, autogen_context)
text += ",\n%sserver_default=%s" % (indent, rendered)
if type_ is not None:
text += ",\n%stype_=%s" % (indent,
_repr_type(type_, autogen_context))
if nullable is not None:
text += ",\n%snullable=%r" % (
indent, nullable,)
if existing_nullable is not None:
text += ",\n%sexisting_nullable=%r" % (
indent, existing_nullable)
if existing_server_default:
rendered = _render_server_default(
existing_server_default,
autogen_context)
text += ",\n%sexisting_server_default=%s" % (
indent, rendered)
if schema:
text += ",\n%sschema=%r" % (indent, schema)
text += ")"
return text
def _user_autogenerate_prefix(autogen_context):
prefix = autogen_context['opts']['user_module_prefix']
if prefix is None:
return _sqlalchemy_autogenerate_prefix(autogen_context)
else:
return prefix
def _sqlalchemy_autogenerate_prefix(autogen_context):
return autogen_context['opts']['sqlalchemy_module_prefix'] or ''
def _alembic_autogenerate_prefix(autogen_context):
return autogen_context['opts']['alembic_module_prefix'] or ''
def _user_defined_render(type_, object_, autogen_context):
if 'opts' in autogen_context and \
'render_item' in autogen_context['opts']:
render = autogen_context['opts']['render_item']
if render:
rendered = render(type_, object_, autogen_context)
if rendered is not False:
return rendered
return False
def _render_column(column, autogen_context):
rendered = _user_defined_render("column", column, autogen_context)
if rendered is not False:
return rendered
opts = []
if column.server_default:
rendered = _render_server_default(
column.server_default, autogen_context
)
if rendered:
opts.append(("server_default", rendered))
if not column.autoincrement:
opts.append(("autoincrement", column.autoincrement))
if column.nullable is not None:
opts.append(("nullable", column.nullable))
# TODO: for non-ascii colname, assign a "key"
return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % {
'prefix': _sqlalchemy_autogenerate_prefix(autogen_context),
'name': column.name,
'type': _repr_type(column.type, autogen_context),
'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts])
}
def _render_server_default(default, autogen_context):
rendered = _user_defined_render("server_default", default, autogen_context)
if rendered is not False:
return rendered
if isinstance(default, sa_schema.DefaultClause):
if isinstance(default.arg, string_types):
default = default.arg
else:
default = str(default.arg.compile(
dialect=autogen_context['dialect']))
if isinstance(default, string_types):
# TODO: this is just a hack to get
# tests to pass until we figure out
# WTF sqlite is doing
default = re.sub(r"^'|'$", "", default)
return repr(default)
else:
return None
def _repr_type(type_, autogen_context):
rendered = _user_defined_render("type", type_, autogen_context)
if rendered is not False:
return rendered
mod = type(type_).__module__
imports = autogen_context.get('imports', None)
if mod.startswith("sqlalchemy.dialects"):
dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1)
if imports is not None:
imports.add("from sqlalchemy.dialects import %s" % dname)
return "%s.%r" % (dname, type_)
elif mod.startswith("sqlalchemy"):
prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
return "%s%r" % (prefix, type_)
else:
prefix = _user_autogenerate_prefix(autogen_context)
return "%s%r" % (prefix, type_)
def _render_constraint(constraint, autogen_context):
renderer = _constraint_renderers.get(type(constraint), None)
if renderer:
return renderer(constraint, autogen_context)
else:
return None
def _render_primary_key(constraint, autogen_context):
rendered = _user_defined_render("primary_key", constraint, autogen_context)
if rendered is not False:
return rendered
if not constraint.columns:
return None
opts = []
if constraint.name:
opts.append(("name", repr(constraint.name)))
return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(
[repr(c.key) for c in constraint.columns] +
["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
def _fk_colspec(fk, metadata_schema):
"""Implement a 'safe' version of ForeignKey._get_colspec() that
never tries to resolve the remote table.
"""
if metadata_schema is None:
return fk._get_colspec()
else:
# need to render schema breaking up tokens by hand, since the
# ForeignKeyConstraint here may not actually have a remote
# Table present
tokens = fk._colspec.split(".")
# no schema in the colspec, render it
if len(tokens) == 2:
return "%s.%s" % (metadata_schema, fk._colspec)
else:
return fk._colspec
def _render_foreign_key(constraint, autogen_context):
rendered = _user_defined_render("foreign_key", constraint, autogen_context)
if rendered is not False:
return rendered
opts = []
if constraint.name:
opts.append(("name", repr(constraint.name)))
if constraint.onupdate:
opts.append(("onupdate", repr(constraint.onupdate)))
if constraint.ondelete:
opts.append(("ondelete", repr(constraint.ondelete)))
if constraint.initially:
opts.append(("initially", repr(constraint.initially)))
if constraint.deferrable:
opts.append(("deferrable", repr(constraint.deferrable)))
if constraint.use_alter:
opts.append(("use_alter", repr(constraint.use_alter)))
apply_metadata_schema = constraint.parent.metadata.schema
return "%(prefix)sForeignKeyConstraint([%(cols)s], "\
"[%(refcols)s], %(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"cols": ", ".join("'%s'" % f.parent.key for f in constraint.elements),
"refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema))
for f in constraint.elements),
"args": ", ".join(
["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
def _render_check_constraint(constraint, autogen_context):
rendered = _user_defined_render("check", constraint, autogen_context)
if rendered is not False:
return rendered
# detect the constraint being part of
# a parent type which is probably in the Table already.
# ideally SQLAlchemy would give us more of a first class
# way to detect this.
if constraint._create_rule and \
hasattr(constraint._create_rule, 'target') and \
isinstance(constraint._create_rule.target,
sqltypes.TypeEngine):
return None
opts = []
if constraint.name:
opts.append(("name", repr(constraint.name)))
return "%(prefix)sCheckConstraint(%(sqltext)r%(opts)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"opts": ", " + (", ".join("%s=%s" % (k, v)
for k, v in opts)) if opts else "",
"sqltext": str(
constraint.sqltext.compile(
dialect=autogen_context['dialect']
)
)
}
_constraint_renderers = {
sa_schema.PrimaryKeyConstraint: _render_primary_key,
sa_schema.ForeignKeyConstraint: _render_foreign_key,
sa_schema.UniqueConstraint: _render_unique_constraint,
sa_schema.CheckConstraint: _render_check_constraint
}
|
|
"""Support for ESPHome lights."""
from __future__ import annotations
from typing import Any, cast
from aioesphomeapi import APIVersion, LightColorCapability, LightInfo, LightState
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_TRANSITION,
ATTR_WHITE,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_ONOFF,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_UNKNOWN,
COLOR_MODE_WHITE,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import EsphomeEntity, esphome_state_property, platform_async_setup_entry
FLASH_LENGTHS = {FLASH_SHORT: 2, FLASH_LONG: 10}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up ESPHome lights based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="light",
info_type=LightInfo,
entity_type=EsphomeLight,
state_type=LightState,
)
_COLOR_MODE_MAPPING = {
COLOR_MODE_ONOFF: [
LightColorCapability.ON_OFF,
],
COLOR_MODE_BRIGHTNESS: [
LightColorCapability.ON_OFF | LightColorCapability.BRIGHTNESS,
# for compatibility with older clients (2021.8.x)
LightColorCapability.BRIGHTNESS,
],
COLOR_MODE_COLOR_TEMP: [
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.COLOR_TEMPERATURE,
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.COLD_WARM_WHITE,
],
COLOR_MODE_RGB: [
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.RGB,
],
COLOR_MODE_RGBW: [
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.RGB
| LightColorCapability.WHITE,
],
COLOR_MODE_RGBWW: [
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.RGB
| LightColorCapability.WHITE
| LightColorCapability.COLOR_TEMPERATURE,
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.RGB
| LightColorCapability.COLD_WARM_WHITE,
],
COLOR_MODE_WHITE: [
LightColorCapability.ON_OFF
| LightColorCapability.BRIGHTNESS
| LightColorCapability.WHITE
],
}
def _color_mode_to_ha(mode: int) -> str:
"""Convert an esphome color mode to a HA color mode constant.
Choses the color mode that best matches the feature-set.
"""
candidates = []
for ha_mode, cap_lists in _COLOR_MODE_MAPPING.items():
for caps in cap_lists:
if caps == mode:
# exact match
return ha_mode
if (mode & caps) == caps:
# all requirements met
candidates.append((ha_mode, caps))
if not candidates:
return COLOR_MODE_UNKNOWN
# choose the color mode with the most bits set
candidates.sort(key=lambda key: bin(key[1]).count("1"))
return candidates[-1][0]
def _filter_color_modes(
supported: list[int], features: LightColorCapability
) -> list[int]:
"""Filter the given supported color modes, excluding all values that don't have the requested features."""
return [mode for mode in supported if mode & features]
# https://github.com/PyCQA/pylint/issues/3150 for all @esphome_state_property
# pylint: disable=invalid-overridden-method
class EsphomeLight(EsphomeEntity[LightInfo, LightState], LightEntity):
"""A light implementation for ESPHome."""
@property
def _supports_color_mode(self) -> bool:
"""Return whether the client supports the new color mode system natively."""
return self._api_version >= APIVersion(1, 6)
@esphome_state_property
def is_on(self) -> bool | None:
"""Return true if the light is on."""
return self._state.state
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
data: dict[str, Any] = {"key": self._static_info.key, "state": True}
# The list of color modes that would fit this service call
color_modes = self._native_supported_color_modes
try_keep_current_mode = True
# rgb/brightness input is in range 0-255, but esphome uses 0-1
if (brightness_ha := kwargs.get(ATTR_BRIGHTNESS)) is not None:
data["brightness"] = brightness_ha / 255
color_modes = _filter_color_modes(
color_modes, LightColorCapability.BRIGHTNESS
)
if (rgb_ha := kwargs.get(ATTR_RGB_COLOR)) is not None:
rgb = tuple(x / 255 for x in rgb_ha)
color_bri = max(rgb)
# normalize rgb
data["rgb"] = tuple(x / (color_bri or 1) for x in rgb)
data["color_brightness"] = color_bri
color_modes = _filter_color_modes(color_modes, LightColorCapability.RGB)
try_keep_current_mode = False
if (rgbw_ha := kwargs.get(ATTR_RGBW_COLOR)) is not None:
# pylint: disable=invalid-name
*rgb, w = tuple(x / 255 for x in rgbw_ha) # type: ignore[assignment]
color_bri = max(rgb)
# normalize rgb
data["rgb"] = tuple(x / (color_bri or 1) for x in rgb)
data["white"] = w
data["color_brightness"] = color_bri
color_modes = _filter_color_modes(
color_modes, LightColorCapability.RGB | LightColorCapability.WHITE
)
try_keep_current_mode = False
if (rgbww_ha := kwargs.get(ATTR_RGBWW_COLOR)) is not None:
# pylint: disable=invalid-name
*rgb, cw, ww = tuple(x / 255 for x in rgbww_ha) # type: ignore[assignment]
color_bri = max(rgb)
# normalize rgb
data["rgb"] = tuple(x / (color_bri or 1) for x in rgb)
color_modes = _filter_color_modes(color_modes, LightColorCapability.RGB)
if _filter_color_modes(color_modes, LightColorCapability.COLD_WARM_WHITE):
# Device supports setting cwww values directly
data["cold_white"] = cw
data["warm_white"] = ww
color_modes = _filter_color_modes(
color_modes, LightColorCapability.COLD_WARM_WHITE
)
else:
# need to convert cw+ww part to white+color_temp
white = data["white"] = max(cw, ww)
if white != 0:
min_ct = self.min_mireds
max_ct = self.max_mireds
ct_ratio = ww / (cw + ww)
data["color_temperature"] = min_ct + ct_ratio * (max_ct - min_ct)
color_modes = _filter_color_modes(
color_modes,
LightColorCapability.COLOR_TEMPERATURE | LightColorCapability.WHITE,
)
try_keep_current_mode = False
data["color_brightness"] = color_bri
if (flash := kwargs.get(ATTR_FLASH)) is not None:
data["flash_length"] = FLASH_LENGTHS[flash]
if (transition := kwargs.get(ATTR_TRANSITION)) is not None:
data["transition_length"] = transition
if (color_temp := kwargs.get(ATTR_COLOR_TEMP)) is not None:
data["color_temperature"] = color_temp
if _filter_color_modes(color_modes, LightColorCapability.COLOR_TEMPERATURE):
color_modes = _filter_color_modes(
color_modes, LightColorCapability.COLOR_TEMPERATURE
)
else:
color_modes = _filter_color_modes(
color_modes, LightColorCapability.COLD_WARM_WHITE
)
try_keep_current_mode = False
if (effect := kwargs.get(ATTR_EFFECT)) is not None:
data["effect"] = effect
if (white_ha := kwargs.get(ATTR_WHITE)) is not None:
# ESPHome multiplies brightness and white together for final brightness
# HA only sends `white` in turn_on, and reads total brightness through brightness property
data["brightness"] = white_ha / 255
data["white"] = 1.0
color_modes = _filter_color_modes(
color_modes,
LightColorCapability.BRIGHTNESS | LightColorCapability.WHITE,
)
try_keep_current_mode = False
if self._supports_color_mode and color_modes:
if (
try_keep_current_mode
and self._state is not None
and self._state.color_mode in color_modes
):
# if possible, stay with the color mode that is already set
data["color_mode"] = self._state.color_mode
else:
# otherwise try the color mode with the least complexity (fewest capabilities set)
# popcount with bin() function because it appears to be the best way: https://stackoverflow.com/a/9831671
color_modes.sort(key=lambda mode: bin(mode).count("1"))
data["color_mode"] = color_modes[0]
await self._client.light_command(**data)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
data: dict[str, Any] = {"key": self._static_info.key, "state": False}
if ATTR_FLASH in kwargs:
data["flash_length"] = FLASH_LENGTHS[kwargs[ATTR_FLASH]]
if ATTR_TRANSITION in kwargs:
data["transition_length"] = kwargs[ATTR_TRANSITION]
await self._client.light_command(**data)
@esphome_state_property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
return round(self._state.brightness * 255)
@esphome_state_property
def color_mode(self) -> str | None:
"""Return the color mode of the light."""
if not self._supports_color_mode:
if not (supported := self.supported_color_modes):
return None
return next(iter(supported))
return _color_mode_to_ha(self._state.color_mode)
@esphome_state_property
def rgb_color(self) -> tuple[int, int, int] | None:
"""Return the rgb color value [int, int, int]."""
if not self._supports_color_mode:
return (
round(self._state.red * 255),
round(self._state.green * 255),
round(self._state.blue * 255),
)
return (
round(self._state.red * self._state.color_brightness * 255),
round(self._state.green * self._state.color_brightness * 255),
round(self._state.blue * self._state.color_brightness * 255),
)
@esphome_state_property
def rgbw_color(self) -> tuple[int, int, int, int] | None:
"""Return the rgbw color value [int, int, int, int]."""
white = round(self._state.white * 255)
rgb = cast("tuple[int, int, int]", self.rgb_color)
return (*rgb, white)
@esphome_state_property
def rgbww_color(self) -> tuple[int, int, int, int, int] | None:
"""Return the rgbww color value [int, int, int, int, int]."""
rgb = cast("tuple[int, int, int]", self.rgb_color)
if not _filter_color_modes(
self._native_supported_color_modes, LightColorCapability.COLD_WARM_WHITE
):
# Try to reverse white + color temp to cwww
min_ct = self._static_info.min_mireds
max_ct = self._static_info.max_mireds
color_temp = min(max(self._state.color_temperature, min_ct), max_ct)
white = self._state.white
ww_frac = (color_temp - min_ct) / (max_ct - min_ct)
cw_frac = 1 - ww_frac
return (
*rgb,
round(white * cw_frac / max(cw_frac, ww_frac) * 255),
round(white * ww_frac / max(cw_frac, ww_frac) * 255),
)
return (
*rgb,
round(self._state.cold_white * 255),
round(self._state.warm_white * 255),
)
@esphome_state_property
def color_temp(self) -> float | None: # type: ignore[override]
"""Return the CT color value in mireds."""
return self._state.color_temperature
@esphome_state_property
def effect(self) -> str | None:
"""Return the current effect."""
return self._state.effect
@property
def _native_supported_color_modes(self) -> list[int]:
return self._static_info.supported_color_modes_compat(self._api_version)
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = SUPPORT_FLASH
# All color modes except UNKNOWN,ON_OFF support transition
modes = self._native_supported_color_modes
if any(m not in (0, LightColorCapability.ON_OFF) for m in modes):
flags |= SUPPORT_TRANSITION
if self._static_info.effects:
flags |= SUPPORT_EFFECT
return flags
@property
def supported_color_modes(self) -> set[str] | None:
"""Flag supported color modes."""
supported = set(map(_color_mode_to_ha, self._native_supported_color_modes))
if COLOR_MODE_ONOFF in supported and len(supported) > 1:
supported.remove(COLOR_MODE_ONOFF)
if COLOR_MODE_BRIGHTNESS in supported and len(supported) > 1:
supported.remove(COLOR_MODE_BRIGHTNESS)
if COLOR_MODE_WHITE in supported and len(supported) == 1:
supported.remove(COLOR_MODE_WHITE)
return supported
@property
def effect_list(self) -> list[str]:
"""Return the list of supported effects."""
return self._static_info.effects
@property
def min_mireds(self) -> float: # type: ignore[override]
"""Return the coldest color_temp that this light supports."""
return self._static_info.min_mireds
@property
def max_mireds(self) -> float: # type: ignore[override]
"""Return the warmest color_temp that this light supports."""
return self._static_info.max_mireds
|
|
from ConfigParser import SafeConfigParser, NoOptionError
import re
import os
import shlex
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
lexer = shlex.shlex(line)
lexer.whitespace_split = True
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
def next_token(t):
if t.startswith('-I'):
if len(t) > 2:
d['include_dirs'].append(t[2:])
else:
t = lexer.get_token()
d['include_dirs'].append(t)
elif t.startswith('-L'):
if len(t) > 2:
d['library_dirs'].append(t[2:])
else:
t = lexer.get_token()
d['library_dirs'].append(t)
elif t.startswith('-l'):
d['libraries'].append(t[2:])
elif t.startswith('-D'):
d['macros'].append(t[2:])
else:
d['ignored'].append(t)
return lexer.get_token()
t = lexer.get_token()
while t:
t = next_token(t)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return self._sections.keys()
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name]
m.append('Description: %s' % self.description)
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return self._raw_data.keys()
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not d.has_key(k):
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not d.has_key('requires'):
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = SafeConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not vars.has_key(k):
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print "%s\t%s - %s" % (info.name, info.name, info.description)
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search('([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print info.cflags(section)
if options.libs:
print info.libs(section)
if options.version:
print info.version
if options.min_version:
print info.version >= options.min_version
|
|
##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Generic property store tests.
"""
__all__ = [
"PropertyStoreTest",
"propertyName",
"propertyValue",
]
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from txdav.xml import element as davxml
from txdav.idav import IPropertyStore
from txdav.base.propertystore.base import PropertyName
class NonePropertyStoreTest(unittest.TestCase):
# Subclass must define self.propertyStore in setUp().
def test_interface(self):
try:
verifyObject(IPropertyStore, self.propertyStore)
except BrokenMethodImplementation, e:
self.fail(e)
def test_delete_none(self):
def doDelete():
del self.propertyStore[propertyName("xyzzy")]
self.assertRaises(KeyError, doDelete)
def test_keyInPropertyName(self):
def doGet():
self.propertyStore["xyzzy"]
def doSet():
self.propertyStore["xyzzy"] = propertyValue("Hello, World!")
def doDelete():
del self.propertyStore["xyzzy"]
def doContains():
return "xyzzy" in self.propertyStore
self.assertRaises(TypeError, doGet)
self.assertRaises(TypeError, doSet)
self.assertRaises(TypeError, doDelete)
self.assertRaises(TypeError, doContains)
class PropertyStoreTest(NonePropertyStoreTest):
# Subclass must define self.propertyStore in setUp().
def _changed(self, store):
store.flush()
def _abort(self, store):
store.abort()
@inlineCallbacks
def test_set_get_contains(self):
name = propertyName("test")
value = propertyValue("Hello, World!")
# Test with commit after change
self.propertyStore[name] = value
yield self._changed(self.propertyStore)
self.assertEquals(self.propertyStore.get(name, None), value)
self.failUnless(name in self.propertyStore)
# Test without commit after change
value = propertyValue("Hello, Universe!")
self.propertyStore[name] = value
self.assertEquals(self.propertyStore.get(name, None), value)
self.failUnless(name in self.propertyStore)
@inlineCallbacks
def test_delete_get_contains(self):
# Test with commit after change
name = propertyName("test")
value = propertyValue("Hello, World!")
self.propertyStore[name] = value
yield self._changed(self.propertyStore)
del self.propertyStore[name]
yield self._changed(self.propertyStore)
self.assertEquals(self.propertyStore.get(name, None), None)
self.failIf(name in self.propertyStore)
# Test without commit after change
name = propertyName("test")
value = propertyValue("Hello, Universe!")
self.propertyStore[name] = value
yield self._changed(self.propertyStore)
del self.propertyStore[name]
self.assertEquals(self.propertyStore.get(name, None), None)
self.failIf(name in self.propertyStore)
@inlineCallbacks
def test_peruser(self):
name = propertyName("test")
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
self.propertyStore2[name] = value2
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value2)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore2[name]
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
del self.propertyStore1[name]
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_peruserShadow(self):
name = propertyName("shadow")
self.propertyStore1.setSpecialProperties((name,), (), ())
self.propertyStore2.setSpecialProperties((name,), (), ())
self.propertyStore3.setSpecialProperties((name,), (), ())
self.propertyStore4.setSpecialProperties((name,), (), ())
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
self.propertyStore2[name] = value2
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value2)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore2[name]
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore1[name]
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_peruserShadow_delete(self):
"""
Delete a shadowable property that has not been overridden by the sharee.
"""
name = propertyName("shadow")
self.propertyStore1.setSpecialProperties((name,), (), ())
self.propertyStore2.setSpecialProperties((name,), (), ())
self.propertyStore3.setSpecialProperties((name,), (), ())
self.propertyStore4.setSpecialProperties((name,), (), ())
value1 = propertyValue("Hello, World1!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore2[name]
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore1[name]
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_peruser_global(self):
name = propertyName("global")
self.propertyStore1.setSpecialProperties((), (name,), ())
self.propertyStore2.setSpecialProperties((), (name,), ())
self.propertyStore3.setSpecialProperties((), (name,), ())
self.propertyStore4.setSpecialProperties((), (name,), ())
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
self.propertyStore2[name] = value2
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value2)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value2)
self.assertEquals(self.propertyStore4.get(name, None), value2)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore2[name]
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_proxy(self):
name = propertyName("test")
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
self.propertyStore3[name] = value1
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
self.propertyStore4[name] = value2
yield self._changed(self.propertyStore4)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value2)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore4[name]
yield self._changed(self.propertyStore4)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
del self.propertyStore3[name]
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_proxyOverride(self):
name = propertyName("override")
self.propertyStore1.setSpecialProperties((), (), (name,))
self.propertyStore2.setSpecialProperties((), (), (name,))
self.propertyStore3.setSpecialProperties((), (), (name,))
self.propertyStore4.setSpecialProperties((), (), (name,))
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
self.propertyStore3[name] = value2
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value2)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
del self.propertyStore3[name]
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failUnless(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
del self.propertyStore1[name]
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
@inlineCallbacks
def test_proxyOverrideShadow(self):
name = propertyName("override")
self.propertyStore1.setSpecialProperties((name,), (), (name,))
self.propertyStore2.setSpecialProperties((name,), (), (name,))
self.propertyStore3.setSpecialProperties((name,), (), (name,))
self.propertyStore4.setSpecialProperties((name,), (), (name,))
value1 = propertyValue("Hello, World1!")
value2 = propertyValue("Hello, World2!")
value3 = propertyValue("Hello, World3!")
value4 = propertyValue("Hello, World4!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
self.propertyStore3[name] = value3
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value3)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
self.propertyStore4[name] = value4
yield self._changed(self.propertyStore4)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value3)
self.assertEquals(self.propertyStore4.get(name, None), value4)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
self.propertyStore2[name] = value2
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value3)
self.assertEquals(self.propertyStore4.get(name, None), value4)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore3[name]
yield self._changed(self.propertyStore3)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value4)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore4[name]
yield self._changed(self.propertyStore4)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value2)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value2)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore2[name]
yield self._changed(self.propertyStore2)
self.assertEquals(self.propertyStore1.get(name, None), value1)
self.assertEquals(self.propertyStore2.get(name, None), value1)
self.assertEquals(self.propertyStore3.get(name, None), value1)
self.assertEquals(self.propertyStore4.get(name, None), value1)
self.failUnless(name in self.propertyStore1)
self.failUnless(name in self.propertyStore2)
self.failUnless(name in self.propertyStore3)
self.failUnless(name in self.propertyStore4)
del self.propertyStore1[name]
yield self._changed(self.propertyStore1)
self.assertEquals(self.propertyStore1.get(name, None), None)
self.assertEquals(self.propertyStore2.get(name, None), None)
self.assertEquals(self.propertyStore3.get(name, None), None)
self.assertEquals(self.propertyStore4.get(name, None), None)
self.failIf(name in self.propertyStore1)
self.failIf(name in self.propertyStore2)
self.failIf(name in self.propertyStore3)
self.failIf(name in self.propertyStore4)
def test_iteration(self):
value = propertyValue("Hello, World!")
names = set(propertyName(str(i)) for i in (1, 2, 3, 4))
for name in names:
self.propertyStore[name] = value
self.assertEquals(set(self.propertyStore.keys()), names)
self.assertEquals(len(self.propertyStore), len(names))
@inlineCallbacks
def test_flush(self):
name = propertyName("test")
value = propertyValue("Hello, World!")
#
# Set value flushes correctly
#
self.propertyStore[name] = value
yield self._changed(self.propertyStore)
yield self._abort(self.propertyStore)
self.assertEquals(self.propertyStore.get(name, None), value)
self.assertEquals(len(self.propertyStore), 1)
#
# Deleted value flushes correctly
#
del self.propertyStore[name]
yield self._changed(self.propertyStore)
yield self._abort(self.propertyStore)
self.assertEquals(self.propertyStore.get(name, None), None)
self.assertEquals(len(self.propertyStore), 0)
@inlineCallbacks
def test_abort(self):
name = propertyName("test")
value = propertyValue("Hello, World!")
self.propertyStore[name] = value
yield self._abort(self.propertyStore)
self.assertEquals(self.propertyStore.get(name, None), None)
self.assertEquals(len(self.propertyStore), 0)
@inlineCallbacks
def test_peruser_keys(self):
name = propertyName("shadow")
self.propertyStore1.setSpecialProperties((name,), (), ())
self.propertyStore2.setSpecialProperties((name,), (), ())
value1 = propertyValue("Hello, World1!")
self.propertyStore1[name] = value1
yield self._changed(self.propertyStore1)
self.failUnless(name in self.propertyStore2.keys())
def propertyName(name):
return PropertyName("http://calendarserver.org/ns/test/", name)
def propertyValue(value):
return davxml.ResponseDescription(value)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._template_spec_versions_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TemplateSpecVersionsOperations:
"""TemplateSpecVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
template_spec_name: str,
template_spec_version: str,
template_spec_version_model: "_models.TemplateSpecVersion",
**kwargs: Any
) -> "_models.TemplateSpecVersion":
"""Creates or updates a Template Spec version.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param template_spec_name: Name of the Template Spec.
:type template_spec_name: str
:param template_spec_version: The version of the Template Spec.
:type template_spec_version: str
:param template_spec_version_model: Template Spec Version supplied to the operation.
:type template_spec_version_model:
~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateSpecVersion, or the result of cls(response)
:rtype: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateSpecVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(template_spec_version_model, 'TemplateSpecVersion')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_spec_version=template_spec_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.TemplateSpecsError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TemplateSpecVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TemplateSpecVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Resources/templateSpecs/{templateSpecName}/versions/{templateSpecVersion}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
template_spec_name: str,
template_spec_version: str,
template_spec_version_update_model: Optional["_models.TemplateSpecVersionUpdateModel"] = None,
**kwargs: Any
) -> "_models.TemplateSpecVersion":
"""Updates Template Spec Version tags with specified values.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param template_spec_name: Name of the Template Spec.
:type template_spec_name: str
:param template_spec_version: The version of the Template Spec.
:type template_spec_version: str
:param template_spec_version_update_model: Template Spec Version resource with the tags to be
updated.
:type template_spec_version_update_model:
~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersionUpdateModel
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateSpecVersion, or the result of cls(response)
:rtype: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateSpecVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if template_spec_version_update_model is not None:
_json = self._serialize.body(template_spec_version_update_model, 'TemplateSpecVersionUpdateModel')
else:
_json = None
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_spec_version=template_spec_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.TemplateSpecsError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateSpecVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Resources/templateSpecs/{templateSpecName}/versions/{templateSpecVersion}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
template_spec_name: str,
template_spec_version: str,
**kwargs: Any
) -> "_models.TemplateSpecVersion":
"""Gets a Template Spec version from a specific Template Spec.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param template_spec_name: Name of the Template Spec.
:type template_spec_name: str
:param template_spec_version: The version of the Template Spec.
:type template_spec_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateSpecVersion, or the result of cls(response)
:rtype: ~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateSpecVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_spec_version=template_spec_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.TemplateSpecsError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateSpecVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Resources/templateSpecs/{templateSpecName}/versions/{templateSpecVersion}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
template_spec_name: str,
template_spec_version: str,
**kwargs: Any
) -> None:
"""Deletes a specific version from a Template Spec. When operation completes, status code 200
returned without content.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param template_spec_name: Name of the Template Spec.
:type template_spec_name: str
:param template_spec_version: The version of the Template Spec.
:type template_spec_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_spec_version=template_spec_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.TemplateSpecsError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Resources/templateSpecs/{templateSpecName}/versions/{templateSpecVersion}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
template_spec_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TemplateSpecVersionsListResult"]:
"""Lists all the Template Spec versions in the specified Template Spec.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param template_spec_name: Name of the Template Spec.
:type template_spec_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TemplateSpecVersionsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.templatespecs.v2021_03_01_preview.models.TemplateSpecVersionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateSpecVersionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_spec_name=template_spec_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TemplateSpecVersionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.TemplateSpecsError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Resources/templateSpecs/{templateSpecName}/versions'} # type: ignore
|
|
# by amounra 0216 : http://www.aumhaa.com
from itertools import imap
from ableton.v2.control_surface.mode import *
from aumhaa.v2.base.debug import initialize_debug
debug = initialize_debug()
class SendSysexMode(Mode):
def __init__(self, script = None, sysex = None, *a, **k):
super(SendSysexMode, self).__init__(*a, **k)
self._send_midi = script._send_midi
self._sysex = sysex
def enter_mode(self):
self._send_midi and self._send_midi(self._sysex)
def leave_mode(self):
pass
class DisplayMessageMode(Mode):
def __init__(self, script = None, message = None, *a, **k):
super(DisplayMessageMode, self).__init__(*a, **k)
self._show_message = script.show_message
self._message = message
def enter_mode(self):
self._show_message and self._message and self._show_message(self._message)
def leave_mode(self):
pass
class SendLividSysexMode(Mode):
def __init__(self, livid_settings = None, call = None, message = None, *a, **k):
super(SendLividSysexMode, self).__init__(*a, **k)
self._send = livid_settings.send if hasattr(livid_settings, 'send') else self.fallback_send
self._call = call
self._message = message
def fallback_send(self, call = 'no call', message = 'no message', *a, **k):
debug('sysex call made to invalid livid_settings object:', call, message)
def enter_mode(self):
self._send(self._call, self._message)
def leave_mode(self):
pass
class MomentaryBehaviour(ModeButtonBehaviour):
def press_immediate(self, component, mode):
debug('momentary press')
component.push_mode(mode)
def release_immediate(self, component, mode):
debug('momentary release immediate')
if len(component.active_modes) > 1:
component.pop_mode(mode)
def release_delayed(self, component, mode):
debug('momentary release delayed')
if len(component.active_modes) > 1:
component.pop_mode(mode)
class BicoloredMomentaryBehaviour(MomentaryBehaviour):
def __init__(self, color = 'DefaultButton.On', off_color = 'DefaultButton.Off', *a, **k):
super(BicoloredMomentaryBehaviour, self).__init__(*a, **k)
self._color = color
self._off_color = off_color
def update_button(self, component, mode, selected_mode):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
if mode == selected_mode:
#button.set_light(self._color)
button.mode_selected_color = self._color
else:
#button.set_light(self._off_color)
button.mode_unselected_color = self._off_color
class ExcludingBehaviourMixin(ModeButtonBehaviour):
def __init__(self, excluded_groups = set(), *a, **k):
super(ExcludingBehaviourMixin, self).__init__(*a, **k)
self._excluded_groups = set(excluded_groups)
def is_excluded(self, component, selected):
return bool(component.get_mode_groups(selected) & self._excluded_groups)
def press_immediate(self, component, mode):
if not self.is_excluded(component, component.selected_mode):
super(ExcludingBehaviourMixin, self).press_immediate(component, mode)
def release_delayed(self, component, mode):
if not self.is_excluded(component, component.selected_mode):
super(ExcludingBehaviourMixin, self).release_delayed(component, mode)
def press_delayed(self, component, mode):
if not self.is_excluded(component, component.selected_mode):
super(ExcludingBehaviourMixin, self).press_delayed(component, mode)
def release_immediate(self, component, mode):
if not self.is_excluded(component, component.selected_mode):
super(ExcludingBehaviourMixin, self).release_immediate(component, mode)
def update_button(self, component, mode, selected_mode):
component.get_mode_button(mode).enabled = not self.is_excluded(component, selected_mode)
class ExcludingMomentaryBehaviour(ExcludingBehaviourMixin, MomentaryBehaviour):
def update_button(self, component, mode, selected_mode):
pass
class DelayedExcludingMomentaryBehaviour(ExcludingMomentaryBehaviour):
def press_immediate(self, component, mode):
pass
def press_delayed(self, component, mode):
if not self.is_excluded(component, component.selected_mode):
component.push_mode(mode)
class ShiftedBehaviour(ModeButtonBehaviour):
def __init__(self, color = 1, *a, **k):
super(ShiftedBehaviour, self).__init__(*a, **k)
self._color = color
self._chosen_mode = None
def press_immediate(self, component, mode):
if mode is component.selected_mode and not component.get_mode(mode+'_shifted') is None:
self._chosen_mode = mode+'_shifted'
else:
self._chosen_mode = mode
component.push_mode(self._chosen_mode)
def release_immediate(self, component, mode):
if component.selected_mode.endswith('_shifted'):
component.pop_groups(['shifted'])
elif len(component.active_modes) > 1:
component.pop_unselected_modes()
def release_delayed(self, component, mode):
component.pop_mode(self._chosen_mode)
def update_button(self, component, mode, selected_mode):
if not mode.endswith('_shifted'):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
#debug('--------mode:', mode, 'selected:', selected_mode, 'chosen:', self._chosen_mode)
if mode == selected_mode:
button.mode_selected_color = self._color
elif mode+'_shifted' == selected_mode:
button.mode_unselected_color = self._color+'_shifted'
else:
button.mode_unselected_color = 'DefaultButton.Disabled'
button.update()
class LatchingShiftedBehaviour(ShiftedBehaviour):
def press_immediate(self, component, mode):
if mode is component.selected_mode and component.get_mode(mode+'_shifted'):
self._chosen_mode = mode+'_shifted'
else:
self._chosen_mode = mode
component.push_mode(self._chosen_mode)
def release_immediate(self, component, mode):
if len(component.active_modes) > 1:
component.pop_unselected_modes()
def release_delayed(self, component, mode):
if not mode is self._chosen_mode is mode + '_shifted':
if len(component.active_modes) > 1:
component.pop_mode(component.selected_mode)
class CancellableBehaviour(ModeButtonBehaviour):
_previous_mode = None
def press_immediate(self, component, mode):
active_modes = component.active_modes
groups = component.get_mode_groups(mode)
can_cancel_mode = mode in active_modes or any(imap(lambda other: groups & component.get_mode_groups(other), active_modes))
if can_cancel_mode:
if groups:
component.pop_groups(groups)
else:
component.pop_mode(mode)
self.restore_previous_mode(component)
else:
self.remember_previous_mode(component)
component.push_mode(mode)
def remember_previous_mode(self, component):
self._previous_mode = component.active_modes[0] if component.active_modes else None
def restore_previous_mode(self, component):
if len(component.active_modes) == 0 and self._previous_mode is not None:
component.push_mode(self._previous_mode)
"""
class CancellableBehaviourWithRelease(CancellableBehaviour):
def release_delayed(self, component, mode):
component.pop_mode(mode)
"""
class CancellableBehaviourWithRelease(CancellableBehaviour):
def release_delayed(self, component, mode):
component.pop_mode(mode)
def update_button(self, component, mode, selected_mode):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
value = (mode == selected_mode or bool(groups & selected_groups))*32 or 1
#if mode == selected_mode:
# button.mode_selected_color = self.color
#elif (groups & selected_groups):
# button.mode_unselected_color = self.color
#else:
# button.mode_unselected_color = self.off_color
button.mode_selected_color = value
button.update()
class FlashingBehaviour(CancellableBehaviourWithRelease):
def __init__(self, color = 1, *a, **k):
super(FlashingBehaviour, self).__init__(*a, **k)
self._color = color
def update_button(self, component, mode, selected_mode):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
if mode == selected_mode or bool(groups & selected_groups):
button.send_value(self._color + 7, True)
else:
button.send_value(self._color, True)
class ColoredCancellableBehaviourWithRelease(CancellableBehaviourWithRelease):
def __init__(self, color = 'DefaultButton.On', off_color = 'DefaultButton.Off', *a, **k):
super(ColoredCancellableBehaviourWithRelease, self).__init__(*a, **k)
self._color = color
self._off_color = off_color
def update_button(self, component, mode, selected_mode):
button = component.get_mode_button(mode)
groups = component.get_mode_groups(mode)
selected_groups = component.get_mode_groups(selected_mode)
if mode == selected_mode:
#button.set_light(self._color)
button.mode_selected_color = self._color
else:
#button.set_light(self._off_color)
button.mode_unselected_color = self._off_color
button.update()
class DefaultedBehaviour(ColoredCancellableBehaviourWithRelease):
def __init__(self, default_mode = 'disabled', *a, **k):
super(DefaultedBehaviour, self).__init__(*a, **k)
self._default_mode = default_mode
def press_immediate(self, component, mode):
if mode is component.selected_mode:
mode = self._default_mode
component.push_mode(mode)
def release_immediate(self, component, mode):
if len(component.active_modes) > 1:
component.pop_unselected_modes()
def release_delayed(self, component, mode):
component.pop_mode(mode)
|
|
# -*- coding: utf-8 -*-
"""
core runtime
~~~~~~~~~~~~
platform internals and logic to discover/load/inject.
:author: Sam Gammon <[email protected]>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
"""
from __future__ import print_function
# stdlib
import os
import sys
import abc
import inspect
import importlib
import threading
# core API
from .meta import Proxy
from .injection import Bridge
## Globals
__runtime__ = threading.local()
class Runtime(object):
""" Describes a structure that can manage and schedule execution for Canteen
applications. When a Canteen app is running, there is always an active
``Runtime`` object behind it.
One ``Runtime`` is active per thread at-at-time. It is not possible to use
different ``Runtime`` classes concurrently (for instance, uWSGI and PyPy),
but runtimes can be composed into a compound structure that expresses
combined functionality. """
# == Public Properties == #
routes = None # compiled route map
config = None # application config
bridge = None # window into the injection pool
application = None # WSGI application callable or delegate
# == Private Properties == #
__hooks__ = {} # mapped hook points and methods to call
__owner__ = "Runtime" # meta bucket owner name for subclasses
__wrapped__ = None # wrapped dispatch method calculated on first request
__singleton__ = False # many runtimes can exist, _so power_
__metaclass__ = Proxy.Component # this should be injectable
__precedence__ = False # marked if a specific runtime should win in selection
precedence = property(lambda self: self.__precedence__) # protect writes
# == Abstract Properties == #
@staticmethod
def base_exception():
""" """
return False
@classmethod
def spawn(cls, app):
""" """
global __runtime__
if not getattr(__runtime__, 'active', None):
__runtime__.active = (cls.resolve() if cls is Runtime else cls)(app)
return __runtime__.active
@classmethod
def resolve(cls):
""" """
# @TODO(sgammon): figure out how to prioritize/select a runtime
_default, _preferred = None, []
for child in cls.iter_children():
if hasattr(child, '__default__') and child.__default__:
_default = child
continue
_preferred.append(child)
for item in _preferred:
if item.__precedence__:
return item # usually uWSGI
if _preferred:
return _preferred[0] # Werkzeug
return _default # WSGIref
@classmethod
def set_precedence(cls, status=False):
""" """
return setattr(cls, '__precendence__', status) or cls
@classmethod
def add_hook(cls, hook, context_and_func):
""" """
context, func = context_and_func
assert isinstance(hook, basestring), "hook name must be a string"
if hook not in cls.__hooks__: cls.__hooks__[hook] = []
cls.__hooks__[hook].append((context, func))
return cls
@classmethod
def get_hooks(cls, point):
""" """
if point in cls.__hooks__:
for i in cls.__hooks__[point]:
yield i
raise StopIteration()
@classmethod
def execute_hooks(cls, points, *args, **kwargs):
""" """
if isinstance(points, basestring): points = (points,)
for point in points:
for context, hook in cls.get_hooks(point):
# noinspection PyBroadException
try:
# run as classmethod
if isinstance(hook, classmethod):
hook.__func__(context, *args, **kwargs)
# run as staticmethod
elif isinstance(hook, staticmethod):
hook.__func__(*args, **kwargs)
else:
# must have a singleton if we're running in object context
if not (
hasattr(context, '__singleton__') or not context.__singleton__):
raise RuntimeError('Cannot execute hook method "%s"'
' without matching singleton context.' % hook)
# resolve singleton by context name
obj = Proxy.Component.singleton_map.get(context.__name__)
if not obj: raise RuntimeError('No matching singleton'
' for hook method "%s".' % hook)
# run in singleton context
hook(point, obj, *args, **kwargs)
except Exception:
if __debug__: raise
return
def __init__(self, app):
""" """
self.application, self.bridge = (
app,
Bridge())
def initialize(self):
""" """
self.execute_hooks('initialize', runtime=self)
def configure(self, config):
""" """
self.config = config
self.initialize() # let subclasses initialize
return self
def serve(self, interface, port, bind_only=False):
""" """
server = self.bind(interface, port)
if bind_only:
return server
try:
server.serve_forever()
except (KeyboardInterrupt, Exception):
print("Exiting.")
sys.exit(0)
def bind_environ(self, environ):
""" """
from ..logic import http
self.routes = http.HTTPSemantics.route_map.bind_to_environ(environ)
return (
http.HTTPSemantics,
http.HTTPSemantics.new_request(environ),
http.HTTPSemantics.new_response())
def handshake(self, key, origin=None):
""" WIP """
raise NotImplementedError('Runtime "%s" does not support'
' realtime dispatch semantics. ' % self)
def send(self, payload, binary=False):
""" WIP """
raise NotImplementedError('Runtime "%s" does not support'
' realtime dispatch semantics. ' % self)
def send(self):
""" WIP """
raise NotImplementedError('Runtime "%s" does not support'
' realtime dispatch semantics. ' % self)
def receive(self):
""" WIP """
raise NotImplementedError('Runtime "%s" does not support'
' realtime dispatch semantics. ' % self)
def dispatch(self, environ, start_response):
""" WIP """
from ..base import handler as base_handler
# setup hook context
context = {
'environ': environ,
'start_response': start_response,
'runtime': self}
# call dispatch hooks
self.execute_hooks('dispatch', **context)
# resolve URL via bound routes
http, request, response = (
context['http'],
context['request'],
context['response']
) = self.bind_environ(environ)
# call request hooks
self.execute_hooks('request', **context)
# match route
endpoint, arguments = (
context['endpoint'],
context['arguments']
) = self.routes.match()
# call match hooks
self.execute_hooks('match', **context)
# resolve endpoint
handler = context['handler'] = http.resolve_route(endpoint)
if not handler: # `None` for handler means it didn't match
# update context
context.update({
'code': 404,
'error': True,
'exception': None,
'response': None})
# dispatch error hook for 404
self.execute_hooks(('error', 'complete'), **context)
# noinspection PyCallByClass,PyTypeChecker
http.error(404)
# class-based pages/handlers
if isinstance(handler, type) and issubclass(handler, base_handler.Handler):
# initialize handler
flow = context['handler'] = handler(**context)
# call handler hooks
self.execute_hooks('handler', **context)
# dispatch time: INCEPTION.
result, iterator = flow(arguments), None
if isinstance(result, tuple) and len(result) == 2:
iterator, result = result # extract iterator and raw result
elif isinstance(result, tuple) and len(result) == 4:
status, headers, content_type, content = (
context['status'],
context['headers'],
context['content_type'],
context['content']
) = result # unpack response
_response = context['response'] = response.__class__(content, **{
'status': status,
'headers': headers,
'mimetype': content_type})
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
return _response(environ, start_response)
# unpack response
status, headers, content_type, content = (
context['status'],
context['headers'],
context['content_type'],
context['content']
) = result.status, result.headers, result.content_type, result.response
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
# send start_response
start_response(result.status, [(
k.encode('utf-8').strip(), v.encode('utf-8').strip()
) for k, v in result.headers])
# buffer and return (i guess) @TODO(sgammon): can we do this better?
return iterator or result.response # it's a werkzeug Response
# delegated class-based handlers (for instance, other WSGI apps)
elif isinstance(handler, type) or callable(handler):
# make a neat little shim, containing our runtime
def _foreign_runtime_bridge(status, headers):
""" """
# call response hooks
context['status'], context['headers'], context['response'] = (
status,
headers,
None)
self.execute_hooks(('response', 'complete'), **context)
return start_response(status, headers)
# attach runtime, arguments and actual start_response to shim
_foreign_runtime_bridge.runtime = self
_foreign_runtime_bridge.arguments = arguments
_foreign_runtime_bridge.start_response = start_response
context['start_response'] = _foreign_runtime_bridge
# call hooks, initialize foreign handler with replaced start_response
self.execute_hooks('handler', **context)
return handler(environ, _foreign_runtime_bridge)
# is it a function, maybe?
if inspect.isfunction(handler):
# inject stuff into context
for prop, val in (
('runtime', self),
('self', self.bridge),
('arguments', arguments),
('request', request),
('response', response),
('environ', environ),
('start_response', start_response),
('Response', response.__class__)):
handler.__globals__[prop] = val # inject all the things
# call handler hooks
self.execute_hooks('handler', **context)
# call with arguments only
result = context['response'] = handler(**arguments)
if isinstance(result, response.__class__):
# call response hooks
context['headers'], context['content'] = (
result.headers, result.response)
self.execute_hooks(('response', 'complete'), **context)
# it's a Response class - delegate to attached start_response
return response(environ, start_response)
# a tuple bound to a URL - static response
elif isinstance(result, tuple):
if len(result) == 2: # it's (status_code, response)
status, response = (
context['status'],
context['response'],
) = result
headers = context['headers'] = [
('Content-Type', 'text/html; charset=utf-8')]
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
start_response(status, headers)
return iter([response])
if len(result) == 3: # it's (status_code, headers, response)
status, headers, response = (
context['status'],
context['headers'],
context['response']
) = result
if isinstance(headers, dict):
headers = headers.items()
if 'Content-Type' not in headers:
headers['Content-Type'] = context['headers']['Content-Type'] = (
'text/html; charset=utf-8')
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
start_response(status, headers)
return iter([response])
elif isinstance(result, basestring):
status, headers = (
context['status'],
context['headers'],
context['response']
) = '200 OK', [('Content-Type', 'text/html; charset=utf-8')]
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
start_response(status, headers)
return iter([result])
# could be a bound response
if not callable(handler):
if isinstance(handler, basestring):
context['status'], context['headers'], context['response'] = (
'200 OK', [('Content-Type', 'text/html; charset=utf-8')], result)
# call response hooks
self.execute_hooks(('response', 'complete'), **context)
return iter([handler]) # it's a static response!
raise RuntimeError('Unrecognized handler type: "%s".' % type(handler))
def wrap(self, dispatch):
""" """
if not self.__wrapped__:
# default: return dispatch directly
_dispatch = dispatch
# == development wrappers
dev_config = getattr(self.config, 'app', {}).get('dev', {})
# profiler support
if 'profiler' in dev_config:
profiler_cfg = dev_config['profiler']
if profiler_cfg.get('enable', False):
## grab a profiler
try:
import cProfile as profile
except ImportError:
import profile
## calculate dump file path
profile_path = profiler_cfg.get('dump_file', os.path.abspath(
os.path.join(*(os.getcwd(), '.develop', 'app.profile'))))
## current profile
pkwargs = profiler_cfg.get('profile_kwargs', {})
_current_profile = profile.Profile(**pkwargs)
## handle flushing mechanics
if profiler_cfg.get('on_request', True):
def maybe_flush_profile():
""" """
_current_profile.dump_stats(profile_path)
else:
# @TODO(sgammon): cross-request profiling
raise RuntimeError('Cross-request profiling'
' is currently unsupported.')
def _dispatch(*args, **kwargs):
""" Wrapper to enable profiler support. """
## dispatch
response = _current_profile.runcall(dispatch, *args, **kwargs)
maybe_flush_profile()
return response
self.__wrapped__ = _dispatch # cache locally
return self.__wrapped__
def bind(self, interface, port):
""" """
raise NotImplementedError
def callback(self, start_response):
""" """
def responder(status, headers):
""" """
return start_response(status, headers)
return responder
def __call__(self, environ, start_response):
""" """
try:
return self.wrap(self.dispatch)(environ, self.callback(start_response))
except self.base_exception as exc:
return exc(environ, start_response) # it's an acceptable exception
except Exception:
raise # just raise it k?
class Library(object):
""" Provides a structure that can be used to indicate (and safely handle)
external dependencies. Used extensively inside Canteen and usable by app
developers to introduce different functionality depending on the packages
available. """
name = None # string name of the library
strict = False # whether to hard-fail on ImportError
package = None # reference to the actual library package/module
exception = None # captured ImportError or AttributeError exception, if any
supported = None # boolean indicating whether this lib is supported or not
__owner__, __metaclass__ = "Library", Proxy.Component
def __init__(self, package, strict=False):
""" Initialize this ``Library`` with a target Python ``package``, and
optionally ``strict`` mode.
:param package: ``str`` path to a package that should be imported. When
``Library`` is used in a ``with`` block, the library import must be
successful to proceed in loading/processing the contents of the
block.
:param strict: ``bool`` flag to indicate that the developer wishes to
hard-fail if the given ``package`` is not available. Defaults to
``False``, meaning any ``ImportError`` encountered loading ``package``
will simply be ignored. ``True`` causes the exception to bubble to the
caller. """
if isinstance(package, basestring):
self.name = package
elif isinstance(package, type(abc)):
self.name, self.package, self.supported = package.__name__, package, True
self.strict = strict
def load(self, *subpackages):
""" Load a subpackage from an already-constructed/resolved ``Library``
object. This is usually used from the ``library`` element in a ``with``
block.
:param subpackages: Positional arguments are loaded as subpackages/
submodules from the original ``package`` passed during construciton.
For instance, ``Library('collections').load('defaultdict')`` is
essentially equivalent to ``from collections import defaultdict``.
:raises ImportError: Import issues are directly surfaced from this
method, as it is designed to be wrapped in a ``with`` block.
:returns: Loaded ``module`` object. """
loaded = []
for package in subpackages:
loaded.append(importlib.import_module('.'.join((self.name, package))))
if len(loaded) == 1:
return loaded[0] # special case: one package only (return it directly)
return tuple(loaded) # otherwise, return a tuple of loaded modules
def __enter__(self):
""" Context entrance method, responsible for triggering a load of the top-
level package and propagating exceptions if ``strict`` mode is active.
:retunrs: ``tuple`` of the form ``(self, package)``, such that it can
be unpacked into ``(library, package)`` in a ``with ... as``
block. """
if not self.package and (self.supported is None):
try:
self.package = importlib.import_module(self.name)
except ImportError as e:
self.supported, self.exception = False, e
if self.strict:
raise
else:
self.supported = True
return (self, self.package)
def __exit__(self, exception_cls, exception, traceback):
""" """
if exception:
if self.strict: return False
return True
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.functions import conf_select, stack_select
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from resource_management.libraries.functions.default import default
from resource_management import *
import status_params
import utils
import os
import itertools
import re
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
stack_version = format_stack_version(stack_version_unformatted)
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = status_params.hdfs_user
root_user = "root"
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False
#hadoop params
mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
hadoop_home = "/usr/iop/current/hadoop-client"
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = utils.get_port(dfs_dn_addr)
dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
limits_conf_dir = "/etc/security/limits.d"
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
ambari_libs_dir = "/var/lib/ambari-agent/lib"
#snappy
create_lib_snappy_symlinks = False
snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib/native")
so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited ; "
#security params
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
klist_path_local = functions.get_klist_path()
kinit_path_local = functions.get_kinit_path()
#hosts
hostname = config["hostname"]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
nm_host = default("/clusterHostInfo/nm_host", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_namenodes = not len(namenode_host) == 0
has_jobtracker = not len(jtnode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_histroryserver = not len(hs_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_journalnode_hosts = not len(journalnode_hosts) == 0
has_zkfc_hosts = not len(zkfc_hosts) == 0
has_falcon_host = not len(falcon_host) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#users and groups
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_user = config['configurations']['hbase-env']['hbase_user']
oozie_user = config['configurations']['oozie-env']['oozie_user']
webhcat_user = config['configurations']['hive-env']['hcat_user']
hcat_user = config['configurations']['hive-env']['hcat_user']
hive_user = config['configurations']['hive-env']['hive_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
user_group = config['configurations']['cluster-env']['user_group']
root_group = "root"
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
namenode_dirs_stub_filename = "namenode_dirs_created"
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770
hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
"/var/lib/hdfs/namenode/formatted"]
dfs_name_dirs = dfs_name_dir.split(",")
namenode_formatted_mark_dirs = []
for dn_dir in dfs_name_dirs:
tmp_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
namenode_formatted_mark_dirs.append(tmp_mark_dir)
# Use the namenode RPC address if configured, otherwise, fallback to the default file system
namenode_address = None
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
namenode_address = format("hdfs://{namenode_rpcaddress}")
else:
namenode_address = config['configurations']['core-site']['fs.defaultFS']
fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
# hostname of the active HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
namenode_id = None
namenode_rpc = None
dfs_ha_namemodes_ids_list = []
other_namenode_id = None
if dfs_ha_namenode_ids:
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
if hostname in nn_host:
namenode_id = nn_id
namenode_rpc = nn_host
if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
https_only = True
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
else:
https_only = False
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
if journalnode_address:
journalnode_port = journalnode_address.split(":")[1]
if security_enabled:
_dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
_dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
_dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
_nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
_nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
_nn_principal_name = _nn_principal_name.replace('_HOST',hostname.lower())
nn_kinit_cmd = format("{kinit_path_local} -kt {_nn_keytab} {_nn_principal_name};")
_jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
if _jn_principal_name:
_jn_principal_name = _jn_principal_name.replace('_HOST', hostname.lower())
_jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
jn_kinit_cmd = format("{kinit_path_local} -kt {_jn_keytab} {_jn_principal_name};")
else:
dn_kinit_cmd = ""
nn_kinit_cmd = ""
jn_kinit_cmd = ""
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs
)
io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
if not "com.hadoop.compression.lzo" in io_compression_codecs:
exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
else:
exclude_packages = []
name_node_params = default("/commandParams/namenode", None)
#hadoop params
hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
|
|
"""Zookeeper Serializers, Deserializers, and NamedTuple objects"""
from collections import namedtuple
import struct
from kazoo.exceptions import EXCEPTIONS
from kazoo.protocol.states import ZnodeStat
from kazoo.security import ACL
from kazoo.security import Id
import six
# Struct objects with formats compiled
bool_struct = struct.Struct('B')
int_struct = struct.Struct('!i')
long_struct = struct.Struct('!q')
int_int_struct = struct.Struct('!ii')
int_int_long_struct = struct.Struct('!iiq')
int_long_int_long_struct = struct.Struct('!iqiq')
multiheader_struct = struct.Struct('!iBi')
reply_header_struct = struct.Struct('!iqi')
stat_struct = struct.Struct('!qqqqiiiqiiq')
def read_string(buffer, offset):
"""Reads an int specified buffer into a string and returns the
string and the new offset in the buffer"""
length = int_struct.unpack_from(buffer, offset)[0]
offset += int_struct.size
if length < 0:
return None, offset
else:
index = offset
offset += length
return buffer[index:index + length].decode('utf-8'), offset
def read_acl(bytes, offset):
perms = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
scheme, offset = read_string(bytes, offset)
id, offset = read_string(bytes, offset)
return ACL(perms, Id(scheme, id)), offset
def write_string(bytes):
if not bytes:
return int_struct.pack(-1)
else:
utf8_str = bytes.encode('utf-8')
return int_struct.pack(len(utf8_str)) + utf8_str
def write_buffer(bytes):
if bytes is None:
return int_struct.pack(-1)
else:
return int_struct.pack(len(bytes)) + bytes
def read_buffer(bytes, offset):
length = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
if length < 0:
return None, offset
else:
index = offset
offset += length
return bytes[index:index + length], offset
class Close(namedtuple('Close', '')):
type = -11
@classmethod
def serialize(cls):
return b''
CloseInstance = Close()
class Ping(namedtuple('Ping', '')):
type = 11
@classmethod
def serialize(cls):
return b''
PingInstance = Ping()
class Connect(namedtuple('Connect', 'protocol_version last_zxid_seen'
' time_out session_id passwd read_only')):
type = None
def serialize(self):
b = bytearray()
b.extend(int_long_int_long_struct.pack(
self.protocol_version, self.last_zxid_seen, self.time_out,
self.session_id))
b.extend(write_buffer(self.passwd))
b.extend([1 if self.read_only else 0])
return b
@classmethod
def deserialize(cls, bytes, offset):
proto_version, timeout, session_id = int_int_long_struct.unpack_from(
bytes, offset)
offset += int_int_long_struct.size
password, offset = read_buffer(bytes, offset)
try:
read_only = bool_struct.unpack_from(bytes, offset)[0] is 1
offset += bool_struct.size
except struct.error:
read_only = False
return cls(proto_version, 0, timeout, session_id, password,
read_only), offset
class Create(namedtuple('Create', 'path data acl flags')):
type = 1
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(write_buffer(self.data))
b.extend(int_struct.pack(len(self.acl)))
for acl in self.acl:
b.extend(int_struct.pack(acl.perms) +
write_string(acl.id.scheme) + write_string(acl.id.id))
b.extend(int_struct.pack(self.flags))
return b
@classmethod
def deserialize(cls, bytes, offset):
return read_string(bytes, offset)[0]
class Delete(namedtuple('Delete', 'path version')):
type = 2
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(int_struct.pack(self.version))
return b
@classmethod
def deserialize(self, bytes, offset):
return True
class Exists(namedtuple('Exists', 'path watcher')):
type = 3
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend([1 if self.watcher else 0])
return b
@classmethod
def deserialize(cls, bytes, offset):
stat = ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
return stat if stat.czxid != -1 else None
class GetData(namedtuple('GetData', 'path watcher')):
type = 4
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend([1 if self.watcher else 0])
return b
@classmethod
def deserialize(cls, bytes, offset):
data, offset = read_buffer(bytes, offset)
stat = ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
return data, stat
class SetData(namedtuple('SetData', 'path data version')):
type = 5
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(write_buffer(self.data))
b.extend(int_struct.pack(self.version))
return b
@classmethod
def deserialize(cls, bytes, offset):
return ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
class GetACL(namedtuple('GetACL', 'path')):
type = 6
def serialize(self):
return bytearray(write_string(self.path))
@classmethod
def deserialize(cls, bytes, offset):
count = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
if count == -1: # pragma: nocover
return []
acls = []
for c in range(count):
acl, offset = read_acl(bytes, offset)
acls.append(acl)
stat = ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
return acls, stat
class SetACL(namedtuple('SetACL', 'path acls version')):
type = 7
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(int_struct.pack(len(self.acls)))
for acl in self.acls:
b.extend(int_struct.pack(acl.perms) +
write_string(acl.id.scheme) + write_string(acl.id.id))
b.extend(int_struct.pack(self.version))
return b
@classmethod
def deserialize(cls, bytes, offset):
return ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
class GetChildren(namedtuple('GetChildren', 'path watcher')):
type = 8
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend([1 if self.watcher else 0])
return b
@classmethod
def deserialize(cls, bytes, offset):
count = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
if count == -1: # pragma: nocover
return []
children = []
for c in range(count):
child, offset = read_string(bytes, offset)
children.append(child)
return children
class Sync(namedtuple('Sync', 'path')):
type = 9
def serialize(self):
return write_string(self.path)
@classmethod
def deserialize(cls, buffer, offset):
return read_string(buffer, offset)[0]
class GetChildren2(namedtuple('GetChildren2', 'path watcher')):
type = 12
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend([1 if self.watcher else 0])
return b
@classmethod
def deserialize(cls, bytes, offset):
count = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
if count == -1: # pragma: nocover
return []
children = []
for c in range(count):
child, offset = read_string(bytes, offset)
children.append(child)
stat = ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
return children, stat
class CheckVersion(namedtuple('CheckVersion', 'path version')):
type = 13
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(int_struct.pack(self.version))
return b
class Transaction(namedtuple('Transaction', 'operations')):
type = 14
def serialize(self):
b = bytearray()
for op in self.operations:
b.extend(MultiHeader(op.type, False, -1).serialize() +
op.serialize())
return b + multiheader_struct.pack(-1, True, -1)
@classmethod
def deserialize(cls, bytes, offset):
header = MultiHeader(None, False, None)
results = []
response = None
while not header.done:
if header.type == Create.type:
response, offset = read_string(bytes, offset)
elif header.type == Delete.type:
response = True
elif header.type == SetData.type:
response = ZnodeStat._make(
stat_struct.unpack_from(bytes, offset))
offset += stat_struct.size
elif header.type == CheckVersion.type:
response = True
elif header.type == -1:
err = int_struct.unpack_from(bytes, offset)[0]
offset += int_struct.size
response = EXCEPTIONS[err]()
if response:
results.append(response)
header, offset = MultiHeader.deserialize(bytes, offset)
return results
@staticmethod
def unchroot(client, response):
resp = []
for result in response:
if isinstance(result, six.string_types):
resp.append(client.unchroot(result))
else:
resp.append(result)
return resp
class Reconfig(namedtuple('Reconfig', 'joining leaving new_members config_id')):
type = 16
def serialize(self):
b = bytearray()
b.extend(write_string(self.joining))
b.extend(write_string(self.leaving))
b.extend(write_string(self.new_members))
b.extend(long_struct.pack(self.config_id))
return b
@classmethod
def deserialize(cls, bytes, offset):
data, offset = read_buffer(bytes, offset)
stat = ZnodeStat._make(stat_struct.unpack_from(bytes, offset))
return data, stat
class Auth(namedtuple('Auth', 'auth_type scheme auth')):
type = 100
def serialize(self):
return (int_struct.pack(self.auth_type) + write_string(self.scheme) +
write_string(self.auth))
class Watch(namedtuple('Watch', 'type state path')):
@classmethod
def deserialize(cls, bytes, offset):
"""Given bytes and the current bytes offset, return the
type, state, path, and new offset"""
type, state = int_int_struct.unpack_from(bytes, offset)
offset += int_int_struct.size
path, offset = read_string(bytes, offset)
return cls(type, state, path), offset
class ReplyHeader(namedtuple('ReplyHeader', 'xid, zxid, err')):
@classmethod
def deserialize(cls, bytes, offset):
"""Given bytes and the current bytes offset, return a
:class:`ReplyHeader` instance and the new offset"""
new_offset = offset + reply_header_struct.size
return cls._make(
reply_header_struct.unpack_from(bytes, offset)), new_offset
class MultiHeader(namedtuple('MultiHeader', 'type done err')):
def serialize(self):
b = bytearray()
b.extend(int_struct.pack(self.type))
b.extend([1 if self.done else 0])
b.extend(int_struct.pack(self.err))
return b
@classmethod
def deserialize(cls, bytes, offset):
t, done, err = multiheader_struct.unpack_from(bytes, offset)
offset += multiheader_struct.size
return cls(t, done is 1, err), offset
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import threading
import time
from six.moves import zip_longest
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveDatasetTest(test.TestCase):
def setUp(self):
self.input_values = array_ops.placeholder(dtypes.int64, shape=[None])
self.cycle_length = array_ops.placeholder(dtypes.int64, shape=[])
self.block_length = array_ops.placeholder(dtypes.int64, shape=[])
self.sloppy = array_ops.placeholder(dtypes.bool, shape=[])
self.buffer_output_elements = array_ops.placeholder(dtypes.int64, shape=[])
self.prefetch_input_elements = array_ops.placeholder(dtypes.int64, shape=[])
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def testPythonImplementation(self):
input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]
# Cycle length 1 acts like `Dataset.flat_map()`.
expected_elements = itertools.chain(*input_lists)
for expected, produced in zip(expected_elements,
self._interleave(input_lists, 1, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1.
expected_elements = [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5,
6, 5, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationBlockLength(self):
input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2
expected_elements = [
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5,
5, 6, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 2))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationEmptyLists(self):
input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [],
[6, 6, 6, 6, 6, 6]]
expected_elements = [
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0):
# cycle_length=1,block_length=1 acts like `Dataset.interleave()` and
# `Dataset.flat_map()` and is single-threaded. No synchronization required.
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 1,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: prefetch_input_elements,
})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1):
self.write_coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
sess.run(self.next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testSingleThreaded(self):
self._testSingleThreaded()
def testSingleThreadedSloppy(self):
self._testSingleThreaded(sloppy=True)
def testSingleThreadedPrefetch1Itr(self):
self._testSingleThreaded(prefetch_input_elements=1)
def testSingleThreadedPrefetch1ItrSloppy(self):
self._testSingleThreaded(prefetch_input_elements=1, sloppy=True)
def testSingleThreadedRagged(self):
# Tests a sequence with wildly different elements per iterator.
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [3, 7, 4],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
# Add coordination values for 3 and 7
self.read_coordination_events[3] = threading.Semaphore(0)
self.write_coordination_events[3] = threading.Event()
self.read_coordination_events[7] = threading.Semaphore(0)
self.write_coordination_events[7] = threading.Event()
for expected_element in self._interleave(
[[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1):
self.write_coordination_events[expected_element].set()
output = sess.run(self.next_element)
self.assertEqual(expected_element * expected_element, output)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def _testTwoThreadsNoContention(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContention(self):
self._testTwoThreadsNoContention()
def testTwoThreadsNoContentionSloppy(self):
self._testTwoThreadsNoContention(sloppy=True)
def _testTwoThreadsNoContentionWithRaces(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionWithRaces(self):
self._testTwoThreadsNoContentionWithRaces()
def testTwoThreadsNoContentionWithRacesSloppy(self):
self._testTwoThreadsNoContentionWithRaces(sloppy=True)
def _testTwoThreadsNoContentionBlockLength(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionBlockLength(self):
self._testTwoThreadsNoContentionBlockLength()
def testTwoThreadsNoContentionBlockLengthSloppy(self):
self._testTwoThreadsNoContentionBlockLength(sloppy=True)
def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionWithRacesAndBlocking(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking()
def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True)
def _testEmptyInput(self, sloppy=False):
with self.cached_session() as sess:
# Empty input.
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [],
self.cycle_length: 2,
self.block_length: 3,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testEmptyInput(self):
self._testEmptyInput()
def testEmptyInputSloppy(self):
self._testEmptyInput(sloppy=True)
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False):
# Non-empty input leading to empty output.
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [0, 0, 0],
self.cycle_length: 2,
self.block_length: 3,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testNonEmptyInputIntoEmptyOutputs(self):
self._testNonEmptyInputIntoEmptyOutputs()
def testNonEmptyInputIntoEmptyOutputsSloppy(self):
self._testNonEmptyInputIntoEmptyOutputs(sloppy=True)
def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1):
race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds
# Mixture of non-empty and empty interleaved datasets.
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 0, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: prefetch_input_elements,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)):
self.write_coordination_events[expected_element].set()
# First event starts the worker threads. Additionally, when running the
# sloppy case with prefetch_input_elements=0, we get stuck if we wait
# for the read coordination event for certain event orderings in the
# presence of finishing iterators.
if done_first_event and not (sloppy and (i in race_indices)):
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event or (sloppy and (i in race_indices)):
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
def testPartiallyEmptyOutputs(self):
self._testPartiallyEmptyOutputs()
def testPartiallyEmptyOutputsSloppy(self):
self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0)
def testDelayedOutputSloppy(self):
# Explicitly control the sequence of events to ensure we correctly avoid
# head-of-line blocking.
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: True,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
mis_ordering = [
4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6,
6, 5, 5, 5, 5, 6, 6
]
for element in mis_ordering:
self.write_coordination_events[element].set()
self.assertEqual(element * element, sess.run(self.next_element))
self.assertTrue(self.read_coordination_events[element].acquire(False))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testBlockLengthWithContentionSloppy(self):
with self.cached_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: True,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
# Test against a generating sequence that differs from the uncontended
# case, in order to prove sloppy correctness.
for i, expected_element in enumerate(
self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count,
cycle_length=2,
block_length=3)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def _testEarlyExit(self, sloppy=False):
# Exiting without consuming all input should not block
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 3,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i in range(4, 7):
self.write_coordination_events[i].set()
elem = sess.run(self.next_element) # Start all workers
# Allow the one successful worker to progress beyond the py_func again.
elem = int(math.sqrt(elem))
self.write_coordination_events[elem].set()
self.read_coordination_events[elem].acquire()
# Allow the prefetch to succeed
for i in range(4, 7):
self.read_coordination_events[i].acquire()
self.write_coordination_events[i].set()
def testEarlyExit(self):
self._testEarlyExit()
def testEarlyExitSloppy(self):
self._testEarlyExit(sloppy=True)
def _testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
iterator = dataset.make_one_shot_iterator()
with self.cached_session() as sess:
output_values = []
for _ in range(30):
output_values.append(sess.run(iterator.get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertItemsEqual(output_values, expected_values)
def testTooManyReaders(self):
self._testTooManyReaders()
def testTooManyReadersSloppy(self):
self._testTooManyReaders(sloppy=True)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
iterator = dataset.apply(
interleave_ops.parallel_interleave(
_interleave_fn, cycle_length=1)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testErrorsInOutputFn(self):
with self.cached_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
except_on_element_indices = set([3])
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if i in except_on_element_indices:
self.error = ValueError()
self.write_coordination_events[expected_element].set()
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
self.write_coordination_events[expected_element].set()
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values).map(map_fn)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
with self.cached_session() as sess:
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
with self.cached_session() as sess:
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testShutdownRace(self):
dataset = dataset_ops.Dataset.range(20)
map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1))
dataset = dataset.apply(
interleave_ops.parallel_interleave(
map_fn,
cycle_length=3,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
dataset = dataset.batch(32)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
results = []
with self.cached_session() as sess:
for _ in range(2):
elements = []
sess.run(iterator.initializer)
try:
while True:
elements.extend(sess.run(next_element))
except errors.OutOfRangeError:
pass
results.append(elements)
self.assertAllEqual(results[0], results[1])
if __name__ == "__main__":
test.main()
|
|
from csv2_common import check_keys, requests, show_active_user_groups, show_table
from subprocess import Popen, PIPE
import os
# ----------------------------------------------------------------------------------------------------------------------
def get_form_data_and_update_count(gvar, mandatory, required, optional, key_map={}, query_keys=[]):
form_data = check_keys(
gvar,
mandatory,
required,
optional,
key_map=key_map)
query_count = 0
for key in query_keys:
if key in form_data:
query_count += 1
return form_data, len(form_data)-query_count
# ----------------------------------------------------------------------------------------------------------------------
def images(gvar):
"""
List EC2 images for the specified cloud.
"""
mandatory = ['-cn']
required = []
optional = ['-CSEP', '-CSV', '-g', '-H', '-h', '-ia', '-il', '-inl', '-ioa', '-ioi', '-ios', '-NV', '-ok', '-r', '-s', '-V', '-VC', '-v', '-v', '-x509', '-xA', '-w']
if gvar['retrieve_options']:
return mandatory + required + optional
key_map = {
'-cn': 'cloud_name',
'-ia': 'architectures',
'-il': 'like',
'-inl': 'not_like',
'-ioa': 'owner_aliases',
'-ioi': 'owner_ids',
'-ios': 'operating_systems',
}
# Check for missing arguments or help required.
form_data, updates = get_form_data_and_update_count(
gvar,
mandatory,
required,
optional,
key_map=key_map,
query_keys=['cloud_name'])
# Retrieve data (possibly after changing the filters).
if updates > 0:
response = requests(
gvar,
'/ec2/images/',
form_data
)
else:
response = requests(gvar, '/ec2/images/', query_data={'cloud_name': gvar['user_settings']['cloud-name']})
if response['message']:
print(response['message'])
# Print report.
show_active_user_groups(gvar, response)
show_table(
gvar,
response['ec2_image_filters'],
[
'group_name/Group,k',
'cloud_name/Cloud,k',
'owner_aliases/Aliases/Owner',
'owner_ids/IDs/Owner',
'like/Like/Images',
'not_like/Not Like/Images',
'operating_systems/Operating Systems',
'architectures/Architectures',
],
title="EC2 Image Filters",
)
show_table(
gvar,
response['architectures'],
[
'architecture/Architecture',
],
title="Architecture Filter",
optional=True,
)
show_table(
gvar,
response['operating_systems'],
[
'operating_system/Operating System',
],
title="Operating System Filter",
optional=True,
)
show_table(
gvar,
response['owner_aliases'],
[
'alias/Alias',
],
title="Owner Alias Filter",
optional=True,
)
show_table(
gvar,
response['ec2_images'],
[
'region/Region,k',
'image_location/Location',
'id/ID',
'owner_alias/Alias/Owner',
'owner_id/ID/Owner',
'borrower_id/Borrower ID',
'opsys/Operating System',
'arch/Architecture',
'disk_format/Disk Fromat',
'size/Size',
'visibility/Visibility',
'last_updated/Last Updated',
],
title="EC2 Images",
)
# ----------------------------------------------------------------------------------------------------------------------
def instance_types(gvar):
"""
List EC2 instance types for the specified cloud.
"""
mandatory = ['-cn']
required = []
optional = ['-CSEP', '-CSV', '-g', '-H', '-h', '-itc', '-itf', '-itmn', '-itmx', '-itos', '-itp', '-itpm', '-NV', '-ok', '-r', '-s', '-V', '-VC', '-v', '-v', '-x509', '-xA', '-w']
if gvar['retrieve_options']:
return mandatory + required + optional
key_map = {
'-cn': 'cloud_name',
'-itc': 'cores',
'-itf': 'families',
'-itmn': 'memory_min_gigabytes_per_core',
'-itmx': 'memory_max_gigabytes_per_core',
'-itos': 'operating_systems',
'-itp': 'processors',
'-itpm': 'processor_manufacturers',
}
# Check for missing arguments or help required.
form_data, updates = get_form_data_and_update_count(
gvar,
mandatory,
required,
optional,
key_map=key_map,
query_keys=['cloud_name'])
# Retrieve data (possibly after changing the filters).
if updates > 0:
response = requests(
gvar,
'/ec2/instance-types/',
form_data
)
else:
response = requests(gvar, '/ec2/instance-types/', query_data={'cloud_name': gvar['user_settings']['cloud-name']})
if response['message']:
print(response['message'])
# Print report.
show_active_user_groups(gvar, response)
show_table(
gvar,
response['ec2_instance_type_filters'],
[
'group_name/Group,k',
'cloud_name/Cloud,k',
'families/Families',
'operating_systems/Operatings Systems',
'processors/Processors',
'processor_manufacturers/Processor Manufacturers',
'cores/Cores',
'memory_min_gigabytes_per_core/Min/Memory (GiB per core)',
'memory_max_gigabytes_per_core/Max/Memory (GiB per core)',
# 'owner_aliases/Aliases/Owner',
# 'owner_ids/IDs/Owner',
# 'like/Like/Images',
# 'not_like/Not Like/Images',
# 'operating_systems/Operating Systems',
# 'architectures/Architectures',
],
title="EC2 Instance Type Filters",
)
show_table(
gvar,
response['families'],
[
'instance_family/Family',
],
title="Family Filter",
optional=True,
)
show_table(
gvar,
response['operating_systems'],
[
'operating_system/Operating System',
],
title="Operating System Filter",
optional=True,
)
show_table(
gvar,
response['processors'],
[
'processor/Processor',
],
title="Processor Filter",
optional=True,
)
show_table(
gvar,
response['manufacturers'],
[
'processor_manufacturer/Manufacturer',
],
title="Manufacturer Filter",
optional=True,
)
show_table(
gvar,
response['cores'],
[
'cores/Cores',
],
title="Cores Filter",
optional=True,
)
show_table(
gvar,
response['ec2_instance_types'],
[
'region/Region,k',
'instance_type/Instance Type',
'operating_system/Operating System',
'instance_family/Family',
'processor/Processor',
'processor_manufacturer/Manufacturer',
'cores/Cores',
'memory/Memory',
'memory_per_core/Memory per Core',
'storage/Storage',
'cost_per_hour/Cost per Hour',
],
title="EC2 Instance Types",
)
|
|
#!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
sections), matches the symbols in the orderfile and augments each symbol with
the symbols residing at the same address (due to having identical code). The
output is a list of section matching rules appropriate for the linker option
-section-ordering-file. These section matching rules include both actual
section names and names with wildcard (*) suffixes.
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." and other
suffixes)
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile, with several different prefixes
and suffixes
6. Output catch-all section matching rules for unprofiled methods.
"""
import collections
import logging
import optparse
import sys
import cyglog_to_orderfile
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
_PREFIXES = ('.text.startup.', '.text.hot.', '.text.unlikely.', '.text.')
# Suffixes for the symbols. These are due to method splitting for inlining and
# method cloning for various reasons including constant propagation and
# inter-procedural optimization.
_SUFFIXES = ('.clone.', '.part.', '.isra.', '.constprop.')
def RemoveSuffixes(name):
"""Strips method name suffixes from cloning and splitting.
.clone. comes from cloning in -O3.
.part. comes from partial method splitting for inlining.
.isra. comes from inter-procedural optimizations.
.constprop. is cloning for constant propagation.
"""
for suffix in _SUFFIXES:
name = name.split(suffix)[0]
return name
def _UniqueGenerator(generator):
"""Converts a generator to skip yielding elements already seen.
Example:
@_UniqueGenerator
def Foo():
yield 1
yield 2
yield 1
yield 3
Foo() yields 1,2,3.
"""
def _FilteringFunction(*args, **kwargs):
returned = set()
for item in generator(*args, **kwargs):
if item in returned:
continue
returned.add(item)
yield item
return _FilteringFunction
def _GroupSymbolInfos(symbol_infos):
"""Groups the symbol infos by name and offset.
Args:
symbol_infos: an iterable of SymbolInfo
Returns:
The same output as _GroupSymbolInfosFromBinary.
"""
# Map the addresses to symbols.
offset_to_symbol_infos = collections.defaultdict(list)
name_to_symbol_infos = collections.defaultdict(list)
for symbol in symbol_infos:
symbol = symbol_extractor.SymbolInfo(name=RemoveSuffixes(symbol.name),
offset=symbol.offset,
size=symbol.size,
section=symbol.section)
offset_to_symbol_infos[symbol.offset].append(symbol)
name_to_symbol_infos[symbol.name].append(symbol)
return (dict(offset_to_symbol_infos), dict(name_to_symbol_infos))
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
return _GroupSymbolInfos(symbol_infos)
def _StripPrefix(line):
"""Strips the linker section name prefix from a symbol line.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
for prefix in _PREFIXES:
if line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _SectionNameToSymbols(section_name, section_to_symbols_map):
"""Yields all symbols which could be referred to by section_name.
If the section name is present in the map, the names in the map are returned.
Otherwise, any clone annotations and prefixes are stripped from the section
name and the remainder is returned.
"""
if (not section_name or
section_name == '.text' or
section_name.endswith('*')):
return # Don't return anything for catch-all sections
if section_name in section_to_symbols_map:
for symbol in section_to_symbols_map[section_name]:
yield symbol
else:
name = _StripPrefix(section_name)
if name:
yield name
def GetSectionsFromOrderfile(filename):
"""Yields the sections from an orderfile.
Args:
filename: The name of the orderfile.
Yields:
A list of symbol names.
"""
with open(filename, 'r') as f:
for line in f.xreadlines():
line = line.rstrip('\n')
if line:
yield line
@_UniqueGenerator
def GetSymbolsFromOrderfile(filename, section_to_symbols_map):
"""Yields the symbols from an orderfile. Output elements do not repeat.
Args:
filename: The name of the orderfile.
section_to_symbols_map: The mapping from section to symbol names. If a
section name is missing from the mapping, the
symbol name is assumed to be the section name with
prefixes and suffixes stripped.
Yields:
A list of symbol names.
"""
# TODO(lizeb,pasko): Move this method to symbol_extractor.py
for section in GetSectionsFromOrderfile(filename):
for symbol in _SectionNameToSymbols(RemoveSuffixes(section),
section_to_symbols_map):
yield symbol
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expands a symbol to include all symbols with the same offset.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if profiled_symbol not in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
@_UniqueGenerator
def _SectionMatchingRules(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets the set of section matching rules for section_name.
These rules will include section_name, but also any sections which may
contain the same code due to cloning, splitting, or identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section names including at least section_name.
"""
for name in _ExpandSection(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map):
yield name
# Since only a subset of methods (mostly those compiled with O2) ever get
# suffixes, don't emit the wildcards for ones where it won't be helpful.
# Otherwise linking takes too long.
if name in suffixed_sections:
# TODO(lizeb,pasko): instead of just appending .*, append .suffix.* for
# _SUFFIXES. We can't do this right now because that many wildcards
# seems to kill the linker (linking libchrome takes 3 hours). This gets
# almost all the benefit at a much lower link-time cost, but could cause
# problems with unexpected suffixes.
yield name + '.*'
def _ExpandSection(section_name, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map):
"""Yields the set of section names for section_name.
This set will include section_name, but also any sections which may contain
the same code due to identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
Yields:
Section names including at least section_name.
"""
yield section_name
for first_sym in _SectionNameToSymbols(section_name,
section_to_symbols_map):
for symbol in _SymbolsWithSameOffset(first_sym, name_to_symbol_infos,
offset_to_symbol_infos):
if symbol in symbol_to_sections_map:
for section in symbol_to_sections_map[symbol]:
yield section
for prefix in _PREFIXES:
yield prefix + symbol
@_UniqueGenerator
def _ExpandSections(section_names, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets an ordered set of section matching rules for a list of sections.
Rules will not be repeated.
Args:
section_names: The sections to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
_GroupSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol names.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section matching rules including at least section_names.
"""
for profiled_section in section_names:
for section in _SectionMatchingRules(
profiled_section, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed_sections):
yield section
def _CombineSectionListsByPrimaryName(symbol_to_sections_map):
"""Combines values of the symbol_to_sections_map by stripping suffixes.
Example:
{foo: [.text.foo, .text.bar.part.1],
foo.constprop.4: [.text.baz.constprop.3]} ->
{foo: [.text.foo, .text.bar, .text.baz]}
Args:
symbol_to_sections_map: Mapping from symbol name to list of section names
Returns:
The same mapping, but with symbol and section names suffix-stripped.
"""
simplified = {}
for suffixed_symbol, suffixed_sections in symbol_to_sections_map.iteritems():
symbol = RemoveSuffixes(suffixed_symbol)
sections = [RemoveSuffixes(section) for section in suffixed_sections]
simplified.setdefault(symbol, []).extend(sections)
return simplified
def _SectionsWithSuffixes(symbol_to_sections_map):
"""Finds sections which have suffixes applied.
Args:
symbol_to_sections_map: a map where the values are lists of section names.
Returns:
A set containing all section names which were seen with suffixes applied.
"""
sections_with_suffixes = set()
for suffixed_sections in symbol_to_sections_map.itervalues():
for suffixed_section in suffixed_sections:
section = RemoveSuffixes(suffixed_section)
if section != suffixed_section:
sections_with_suffixes.add(section)
return sections_with_suffixes
def _StripSuffixes(section_list):
"""Remove all suffixes on items in a list of sections or symbols."""
return [RemoveSuffixes(section) for section in section_list]
def main(argv):
parser = optparse.OptionParser(usage=
'usage: %prog [options] <unpatched_orderfile> <library>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
options, argv = parser.parse_args(argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
orderfile_filename = argv[1]
binary_filename = argv[2]
symbol_extractor.SetArchitecture(options.arch)
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
binary_filename)
obj_dir = cygprofile_utils.GetObjDir(binary_filename)
raw_symbol_map = cyglog_to_orderfile.GetSymbolToSectionsMapFromObjectFiles(
obj_dir)
suffixed = _SectionsWithSuffixes(raw_symbol_map)
symbol_to_sections_map = _CombineSectionListsByPrimaryName(raw_symbol_map)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
profiled_sections = _StripSuffixes(
GetSectionsFromOrderfile(orderfile_filename))
expanded_sections = _ExpandSections(
profiled_sections, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed)
for section in expanded_sections:
print section
# The following is needed otherwise Gold only applies a partial sort.
print '.text' # gets methods not in a section, such as assembly
for prefix in _PREFIXES:
print prefix + '*' # gets everything else
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv))
|
|
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import os
from affine import Affine
from app.paths import paths, PathsNotSetExecption
from recharge.dynamic_raster_finder import get_prisms, get_geo, get_individ_ndvi, get_penman, get_prism
from recharge.raster import Raster
from recharge.raster_tools import get_tiff_transform_func, get_tiff_transform
from recharge.tools import day_generator
from recharge.dict_setup import make_pairs, tiff_list
from recharge import STATIC_KEYS, INITIAL_KEYS
def generate_dataset(daterange, out):
print 'the out path {}'.format(out)
if not paths.is_set():
raise PathsNotSetExecption
geo, bounds = setup_geo()
args = out, geo, bounds
extract_initial(*args)
extract_static(*args)
extract_mask(*args)
make_blank_geo_folder(out)
for day in day_generator(*daterange):
extract_prism(day, *args)
extract_ndvi(day, *args)
extract_penman(day, *args)
print '----------------- day {} -------------------'.format(day.strftime('%m_%d_%Y'))
# ============= data extract ==================================================
def extract_prism(day, out, geo, bounds):
out_root = os.path.join(out, 'PRISM')
timestamp = day.strftime('%Y%m%d')
pp = os.path.join(out_root, 'precip', '800m_std_all')
matp = os.path.join(out_root, 'Temp', 'Maximum_standard')
mitp = os.path.join(out_root, 'Temp', 'Minimum_standard')
for base, key in ((pp, 'precip'), (mitp, 'min_temp'), (matp, 'max_temp')):
arr = get_prism(day, key)
name = '{}_{}.tif'.format(key, timestamp)
p = os.path.join(base, name)
slice_and_save(p, arr, geo, *bounds)
def extract_penman(day, out, geo, bounds):
keys = ('etrs', 'rg')
for k in keys:
arr = get_penman(day, k)
year = str(day.year)
yday = day.timetuple().tm_yday
if k == 'etrs':
p = os.path.join(out, 'PM_RAD', '{}{}'.format('PM', year))
name = '{}_{}_{:03n}.tif'.format('PM_NM', year, yday)
elif k == 'rg':
p = os.path.join(out, 'PM_RAD', '{}{}'.format('rad', year))
name = '{}_{}_{:03n}.tif'.format('RTOT', year, yday)
# if not os.path.isdir(p):
# os.makedirs(p)
p = os.path.join(p, name)
slice_and_save(p, arr, geo, *bounds)
def extract_ndvi(day, out, geo, bounds):
arr = get_individ_ndvi(day)
timestamp = day.strftime('%Y_%m_%d')
year = str(day.year)
p = os.path.join(out, 'NDVI', 'NDVI', year)
# if not os.path.isdir(p):
# os.makedirs(p)
p = os.path.join(p, '{}{}.tif'.format('NDVI', timestamp))
slice_and_save(p, arr, geo, *bounds)
# ============= initial/static extract ========================================
def save_initial(p, raster, transform, startc, endc, startr, endr):
geo = raster.geo
geo['rows'] = endr - startr
geo['cols'] = endc - startc
geo['geotransform'] = transform.to_gdal()
arr = raster.masked()
slice_and_save(p, arr, geo, startc, endc, startr, endr)
def extract_initial(*args):
pairs = make_pairs(paths.initial_inputs, INITIAL_KEYS)
root = paths.initial_inputs
_extract('initialize', pairs, root, *args)
def extract_static(*args):
pairs = make_pairs(paths.static_inputs, STATIC_KEYS)
root = paths.static_inputs
_extract('statics', pairs, root, *args)
def _extract(tag, pairs, root, out, geo, bounds):
for k, pair in pairs:
raster = Raster(pair, root=root)
p = make_reduced_path(out, tag, k)
arr = raster.masked()
slice_and_save(p, arr, geo, *bounds) # TODO - GELP
print '{} {} reduced'.format(tag, k)
def extract_mask(out, geo, bounds):
raster = Raster(paths.mask)
p = make_reduced_path(out, 'Mask', 'mask')
arr = raster.masked()
slice_and_save(p, arr, geo, *bounds)
print 'mask reduced'
def make_blank_geo_folder(out):
print 'makeing blank geo for {}'.format(out)
p = os.path.join(out,'Blank_Geo')
if not os.path.isdir(p):
os.makedirs(p)
# ============= helpers =========================================
def setup_geo():
raster = Raster(paths.mask)
mask_arr = raster.as_bool_array
# get raster to provide geo data (need one that is not "Byte")
root = paths.initial_inputs
name = tiff_list(root)[0]
raster = Raster(name, root=root)
geo = raster.geo
startc, endc, startr, endr = bounding_box(mask_arr)
geo['rows'] = endr - startr
geo['cols'] = endc - startc
transform = get_tiff_transform(paths.mask)
transform *= Affine.translation(startc, startr)
geo['geotransform'] = transform.to_gdal()
return geo, (startc, endc, startr, endr)
def bounding_box(arr, padding=1):
startr, endr = None, None
for i, ri in enumerate(arr):
if ri.any():
if startr is None:
startr = i - padding
elif startr is not None:
endr = i + padding
break
startc, endc = None, None
for i, ri in enumerate(arr.T):
if ri.any():
if startc is None:
startc = i - padding
elif startc is not None:
endc = i + padding
break
return startc, endc, startr, endr
def slice_and_save(p, arr, geo, startc, endc, startr, endr):
if not os.path.isdir(os.path.dirname(p)):
os.makedirs(os.path.dirname(p))
raster = Raster.fromarray(arr)
marr = raster.unmasked(arr.shape) # todo - GELP: unmasked() modification causing error in reduce_dataset()
marr = marr[slice(startr, endr), slice(startc, endc)]
# print 'saving {}'.format(p)
raster.save(p, marr, geo)
def make_reduced_path(out, tag, k):
p = os.path.join(out, tag)
# if not os.path.isdir(p):
# os.makedirs(p)
p = os.path.join(p, '{}_reduced.tif'.format(k))
return p
def get_transform(startc, startr):
transform = get_tiff_transform(paths.mask)
transform *= Affine.translation(startc, startr)
transform = transform.to_gdal()
return transform
if __name__ == '__main__':
paths.build('/Volumes/Seagate Expansion Drive/ETRM_inputs')
generate_dataset(['1/1/2000', '12/31/2013'], '/Volumes/Seagate Expansion Drive/gabe_aoi_inputs')
# ============= EOF =============================================
|
|
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sure
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import drop_table, sync_table
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery
from tests.integration.cqlengine.base import BaseCassEngTestCase
class TestMultiKeyModel(Model):
partition = columns.Integer(primary_key=True)
cluster = columns.Integer(primary_key=True)
count = columns.Integer(required=False)
text = columns.Text(required=False)
class BatchQueryTests(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(BatchQueryTests, cls).setUpClass()
drop_table(TestMultiKeyModel)
sync_table(TestMultiKeyModel)
@classmethod
def tearDownClass(cls):
super(BatchQueryTests, cls).tearDownClass()
drop_table(TestMultiKeyModel)
def setUp(self):
super(BatchQueryTests, self).setUp()
self.pkey = 1
for obj in TestMultiKeyModel.filter(partition=self.pkey):
obj.delete()
def test_insert_success_case(self):
b = BatchQuery()
inst = TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=2, count=3, text='4')
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_update_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.count = 4
inst.batch(b).save()
inst2 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
assert inst2.count == 3
b.execute()
inst3 = TestMultiKeyModel.get(partition=self.pkey, cluster=2)
assert inst3.count == 4
def test_delete_success_case(self):
inst = TestMultiKeyModel.create(partition=self.pkey, cluster=2, count=3, text='4')
b = BatchQuery()
inst.batch(b).delete()
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
b.execute()
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=2)
def test_context_manager(self):
with BatchQuery() as b:
for i in range(5):
TestMultiKeyModel.batch(b).create(partition=self.pkey, cluster=i, count=3, text='4')
for i in range(5):
with self.assertRaises(TestMultiKeyModel.DoesNotExist):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
for i in range(5):
TestMultiKeyModel.get(partition=self.pkey, cluster=i)
def test_bulk_delete_success_case(self):
for i in range(1):
for j in range(5):
TestMultiKeyModel.create(partition=i, cluster=j, count=i*j, text='{0}:{1}'.format(i,j))
with BatchQuery() as b:
TestMultiKeyModel.objects.batch(b).filter(partition=0).delete()
assert TestMultiKeyModel.filter(partition=0).count() == 5
assert TestMultiKeyModel.filter(partition=0).count() == 0
#cleanup
for m in TestMultiKeyModel.all():
m.delete()
def test_empty_batch(self):
b = BatchQuery()
b.execute()
with BatchQuery() as b:
pass
class BatchQueryCallbacksTests(BaseCassEngTestCase):
def test_API_managing_callbacks(self):
# Callbacks can be added at init and after
def my_callback(*args, **kwargs):
pass
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 2, named_arg='value')
batch.add_callback(my_callback, 1, 3)
assert batch._callbacks == [
(my_callback, (), {}),
(my_callback, (2,), {'named_arg':'value'}),
(my_callback, (1, 3), {})
]
def test_callbacks_properly_execute_callables_and_tuples(self):
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
# adding on init:
batch = BatchQuery()
batch.add_callback(my_callback)
batch.add_callback(my_callback, 'more', 'args')
batch.execute()
assert len(call_history) == 2
assert [(), ('more', 'args')] == call_history
def test_callbacks_tied_to_execute(self):
"""Batch callbacks should NOT fire if batch is not executed in context manager mode"""
call_history = []
def my_callback(*args, **kwargs):
call_history.append(args)
with BatchQuery() as batch:
batch.add_callback(my_callback)
pass
assert len(call_history) == 1
class SomeError(Exception):
pass
with self.assertRaises(SomeError):
with BatchQuery() as batch:
batch.add_callback(my_callback)
# this error bubbling up through context manager
# should prevent callback runs (along with b.execute())
raise SomeError
# still same call history. Nothing added
assert len(call_history) == 1
# but if execute ran, even with an error bubbling through
# the callbacks also would have fired
with self.assertRaises(SomeError):
with BatchQuery(execute_on_exception=True) as batch:
batch.add_callback(my_callback)
# this error bubbling up through context manager
# should prevent callback runs (along with b.execute())
raise SomeError
# still same call history
assert len(call_history) == 2
|
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=g-multiple-import
"""Joints connect bodies and constrain their movement."""
import abc
from typing import Any, List, Tuple
from brax import jumpy as jp
from brax import math
from brax import pytree
from brax.physics import bodies
from brax.physics import config_pb2
from brax.physics.base import P, QP, vec_to_arr
class Joint(abc.ABC):
"""A joint connects two bodies and constrains their movement.
This constraint is determined by axes that define how to bodies may move in
relation to one-another.
"""
__pytree_ignore__ = ('index', 'dof')
def __init__(self,
joints: List[config_pb2.Joint],
body: bodies.Body,
spring_damping_coeff: float = 2.0):
"""Creates a Joint that connects two bodies and constrains their movement.
Args:
joints: list of joints (all of the same type) to batch together
body: batched body that contain the parents and children of each joint
spring_damping_coeff: coefficient for setting default spring damping
"""
self.stiffness = jp.array([j.stiffness for j in joints])
self.angular_damping = jp.array([j.angular_damping for j in joints])
self.spring_damping = jp.array([
j.spring_damping if j.HasField('spring_damping') else
spring_damping_coeff * jp.sqrt(j.stiffness) for j in joints
])
self.limit_strength = jp.array([
j.limit_strength if j.HasField('limit_strength') else j.stiffness
for j in joints
])
self.limit = jp.array([[[i.min, i.max]
for i in j.angle_limit]
for j in joints]) / 180.0 * jp.pi
self.body_p = jp.take(body, [body.index[j.parent] for j in joints])
self.body_c = jp.take(body, [body.index[j.child] for j in joints])
self.off_p = jp.array([vec_to_arr(j.parent_offset) for j in joints])
self.off_c = jp.array([vec_to_arr(j.child_offset) for j in joints])
self.index = {j.name: i for i, j in enumerate(joints)}
self.dof = len(joints[0].angle_limit)
v_rot = jp.vmap(math.rotate, include=[True, False])
relative_quats = jp.array(
[math.euler_to_quat(vec_to_arr(j.reference_rotation)) for j in joints])
self.axis_c = jp.array([
v_rot(jp.eye(3), math.euler_to_quat(vec_to_arr(j.rotation)))
for j in joints
])
self.axis_p = jp.array(
[v_rot(j, r) for j, r in zip(self.axis_c, relative_quats)])
def apply(self, qp: QP) -> P:
"""Returns impulses to constrain and align bodies connected by a joint.
Args:
qp: State data for system
Returns:
dP: Impulses on all bodies to maintain joint constraints
"""
qp_p = jp.take(qp, self.body_p.idx)
qp_c = jp.take(qp, self.body_c.idx)
dp_p, dp_c = jp.vmap(type(self).apply_reduced)(self, qp_p, qp_c)
# sum together all impulse contributions across parents and children
body_idx = jp.concatenate((self.body_p.idx, self.body_c.idx))
dp_vel = jp.concatenate((dp_p.vel, dp_c.vel))
dp_ang = jp.concatenate((dp_p.ang, dp_c.ang))
dp_vel = jp.segment_sum(dp_vel, body_idx, qp.pos.shape[0])
dp_ang = jp.segment_sum(dp_ang, body_idx, qp.pos.shape[0])
return P(vel=dp_vel, ang=dp_ang)
def angle_vel(self, qp: QP) -> Tuple[Any, Any]:
"""Returns joint angle and velocity.
Args:
qp: State data for system
Returns:
angle: n-tuple of joint angles where n = # DoF of the joint
vel: n-tuple of joint velocities where n = # DoF of the joint
"""
@jp.vmap
def op(joint, qp_p, qp_c):
axes, angles = joint.axis_angle(qp_p, qp_c)
vels = tuple([jp.dot(qp_p.ang - qp_c.ang, axis) for axis in axes])
return angles, vels
qp_p = jp.take(qp, self.body_p.idx)
qp_c = jp.take(qp, self.body_c.idx)
angles, vels = op(self, qp_p, qp_c)
return angles, vels
@abc.abstractmethod
def apply_reduced(self, qp_p: QP, qp_c: QP) -> Tuple[P, P]:
"""Returns impulses to constrain and align bodies connected by a joint.
Operates in reduced joint space.
Args:
qp_p: Joint parent state data
qp_c: Joint child state data
Returns:
dp_p: Joint parent impulse
dp_c: Joint child impulse
"""
@abc.abstractmethod
def axis_angle(self, qp_p: QP, qp_c: QP) -> Tuple[Any, Any]:
"""Returns axes and angles of a single joint.
vmap across axis_angle to get all joints.
Args:
qp_p: State for parent body
qp_c: State for child body
Returns:
axis: n-tuple of joint axes where n = # DoF of the joint
angle: n-tuple of joint angles where n = # DoF of the joint
"""
@pytree.register
class Revolute(Joint):
"""A revolute joint constrains two bodies around a single axis.
Constructs a revolute joint where the parent's local x-axis is constrained
to point in the same direction as the child's local x-axis. This construction
follows the line of nodes convention shared by the universal and spherical
joints for x-y'-z'' intrinsic euler angles.
"""
def __init__(self, joints: List[config_pb2.Joint], body: bodies.Body):
super().__init__(joints, body, .5)
def apply_reduced(self, qp_p: QP, qp_c: QP) -> Tuple[P, P]:
"""Returns calculated impulses in compressed joint space."""
pos_p, vel_p = qp_p.to_world(self.off_p)
pos_c, vel_c = qp_c.to_world(self.off_c)
# push the bodies towards their offsets
# TODO: cap this damping so that it can't overcorrect
impulse = (pos_p - pos_c) * self.stiffness + self.spring_damping * (
vel_p - vel_c)
dp_p = self.body_p.impulse(qp_p, -impulse, pos_p)
dp_c = self.body_c.impulse(qp_c, impulse, pos_c)
# torque the bodies to align their axes
(axis,), (angle,) = self.axis_angle(qp_p, qp_c)
axis_c = math.rotate(self.axis_c[0], qp_c.rot)
torque = self.stiffness * jp.cross(axis, axis_c)
# torque the bodies to stay within angle limits
dang = jp.where(angle < self.limit[0][0], self.limit[0][0] - angle, 0)
dang = jp.where(angle > self.limit[0][1], self.limit[0][1] - angle, dang)
torque -= self.limit_strength * axis * dang
# damp the angular motion
torque -= self.angular_damping * (qp_p.ang - qp_c.ang)
dang_p = jp.matmul(self.body_p.inertia, torque)
dang_c = jp.matmul(self.body_c.inertia, -torque)
dp_p = dp_p.replace(ang=dp_p.ang + dang_p)
dp_c = dp_c.replace(ang=dp_c.ang + dang_c)
return dp_p, dp_c
def axis_angle(self, qp_p: QP, qp_c: QP) -> Tuple[Any, Any]:
"""Returns axes and angles of a single joint."""
axis_p = math.rotate(self.axis_p[0], qp_p.rot)
ref_p = math.rotate(self.axis_p[2], qp_p.rot)
ref_c = math.rotate(self.axis_c[2], qp_c.rot)
# algebraically the same as the calculation in `Spherical`, but simpler
# because child local-x and parent local-x are constrained to be the same
psi = math.signed_angle(axis_p, ref_p, ref_c)
return (axis_p,), (psi,)
@pytree.register
class Universal(Joint):
"""A revolute joint constrains two bodies around two axes.
Constructs a universal joint defined as the first two degrees of freedom
of a spherical joint. See `Spherical` for details.
"""
def apply_reduced(self, qp_p: QP, qp_c: QP) -> Tuple[P, P]:
"""Returns calculated impulses in compressed joint space."""
pos_p, vel_p = qp_p.to_world(self.off_p)
pos_c, vel_c = qp_c.to_world(self.off_c)
# push the bodies towards their offsets
# TODO: cap this damping so that it can't overcorrect
impulse = (pos_p - pos_c) * self.stiffness + self.spring_damping * (
vel_p - vel_c)
dp_p = self.body_p.impulse(qp_p, -impulse, pos_p)
dp_c = self.body_c.impulse(qp_c, impulse, pos_c)
# torque the bodies to align to a joint plane
(axis_1, axis_2), angles = self.axis_angle(qp_p, qp_c)
axis_c_proj = axis_2 - jp.dot(axis_2, axis_1) * axis_1
axis_c_proj = axis_c_proj / jp.safe_norm(axis_c_proj)
torque = (self.limit_strength / 5.) * jp.cross(axis_c_proj, axis_2)
# torque the bodies to stay within angle limits
axis, angle = jp.array((axis_1, axis_2)), jp.array(angles)
dang = jp.where(angle < self.limit[:, 0], self.limit[:, 0] - angle, 0)
dang = jp.where(angle > self.limit[:, 1], self.limit[:, 1] - angle, dang)
torque -= self.limit_strength * jp.sum(jp.vmap(jp.multiply)(axis, dang), 0)
# damp the angular motion
torque -= self.angular_damping * (qp_p.ang - qp_c.ang)
dang_p = jp.matmul(self.body_p.inertia, torque)
dang_c = jp.matmul(self.body_c.inertia, -torque)
dp_p = dp_p.replace(ang=dp_p.ang + dang_p)
dp_c = dp_c.replace(ang=dp_c.ang + dang_c)
return dp_p, dp_c
def axis_angle(self, qp_p: QP, qp_c: QP) -> Tuple[Any, Any]:
"""Returns axes and angles of a single joint."""
v_rot = jp.vmap(math.rotate, include=[True, False])
axis_p_rotated = v_rot(self.axis_p, qp_p.rot)
axis_c_rotated = v_rot(self.axis_c, qp_c.rot)
axis_1_p = axis_p_rotated[0]
axis_2_p = axis_p_rotated[1]
axis_1_c = axis_c_rotated[0]
axis_2_c = axis_c_rotated[1]
axis_3_c = axis_c_rotated[2]
line_of_nodes = jp.cross(axis_3_c, axis_1_p)
line_of_nodes = line_of_nodes / (1e-10 + jp.safe_norm(line_of_nodes))
y_n_normal = axis_1_p
psi = math.signed_angle(y_n_normal, axis_2_p, line_of_nodes)
axis_1_p_in_xz_c = jp.dot(axis_1_p, axis_1_c) * axis_1_c + jp.dot(
axis_1_p, axis_2_c) * axis_2_c
axis_1_p_in_xz_c = axis_1_p_in_xz_c / (1e-10 +
jp.safe_norm(axis_1_p_in_xz_c))
theta = jp.safe_arccos(jp.clip(jp.dot(axis_1_p_in_xz_c, axis_1_p), -1,
1)) * jp.sign(jp.dot(axis_1_p, axis_3_c))
axis = (axis_1_p, axis_2_c)
angle = (psi, theta)
return axis, angle
@pytree.register
class Spherical(Joint):
"""A spherical joint constrains two bodies around three axes.
Constructs a spherical joint which returns intrinsic euler angles in the
x-y'-z'' convention between the parent and child. Uses the line of nodes
construction described in section 3.2.3.2 here:
https://www.sedris.org/wg8home/Documents/WG80485.pdf
"""
def apply_reduced(self, qp_p: QP, qp_c: QP) -> Tuple[P, P]:
"""Returns calculated impulses in compressed joint space."""
pos_p, vel_p = qp_p.to_world(self.off_p)
pos_c, vel_c = qp_c.to_world(self.off_c)
# push the bodies towards their offsets
# TODO: cap this damping so that it can't overcorrect
impulse = (pos_p - pos_c) * self.stiffness + self.spring_damping * (
vel_p - vel_c)
dp_p = self.body_p.impulse(qp_p, -impulse, pos_p)
dp_c = self.body_c.impulse(qp_c, impulse, pos_c)
# torque the bodies to stay within angle limits
axes, angles = self.axis_angle(qp_p, qp_c)
axis, angle = jp.array(axes), jp.array(angles)
dang = jp.where(angle < self.limit[:, 0], self.limit[:, 0] - angle, 0)
dang = jp.where(angle > self.limit[:, 1], self.limit[:, 1] - angle, dang)
torque = -self.limit_strength * jp.sum(jp.vmap(jp.multiply)(axis, dang), 0)
# damp the angular motion
torque -= self.angular_damping * (qp_p.ang - qp_c.ang)
dp_p = dp_p.replace(ang=dp_p.ang + jp.matmul(self.body_p.inertia, torque))
dp_c = dp_c.replace(ang=dp_c.ang + jp.matmul(self.body_c.inertia, -torque))
return dp_p, dp_c
def axis_angle(self, qp_p: QP, qp_c: QP) -> Tuple[Any, Any]:
"""Returns axes and angles of a single joint."""
v_rot = jp.vmap(math.rotate, include=[True, False])
axis_p_rotated = v_rot(self.axis_p, qp_p.rot)
axis_c_rotated = v_rot(self.axis_c, qp_c.rot)
axis_1_p = axis_p_rotated[0]
axis_2_p = axis_p_rotated[1]
axis_1_c = axis_c_rotated[0]
axis_2_c = axis_c_rotated[1]
axis_3_c = axis_c_rotated[2]
line_of_nodes = jp.cross(axis_3_c, axis_1_p)
line_of_nodes = line_of_nodes / (1e-10 + jp.safe_norm(line_of_nodes))
y_n_normal = axis_1_p
psi = math.signed_angle(y_n_normal, axis_2_p, line_of_nodes)
axis_1_p_in_xz_c = jp.dot(axis_1_p, axis_1_c) * axis_1_c + jp.dot(
axis_1_p, axis_2_c) * axis_2_c
axis_1_p_in_xz_c = axis_1_p_in_xz_c / (1e-10 +
jp.safe_norm(axis_1_p_in_xz_c))
ang_between_1_p_xz_c = jp.dot(axis_1_p_in_xz_c, axis_1_p)
theta = jp.safe_arccos(jp.clip(ang_between_1_p_xz_c, -1, 1)) * jp.sign(
jp.dot(axis_1_p, axis_3_c))
yc_n_normal = -axis_3_c
phi = math.signed_angle(yc_n_normal, axis_2_c, line_of_nodes)
axis = (axis_1_p, axis_2_c, axis_3_c)
angle = (psi, theta, phi)
return axis, angle
def get(config: config_pb2.Config, body: bodies.Body) -> List[Joint]:
"""Creates all joints given a config."""
joints = {}
for joint in config.joints:
dof = len(joint.angle_limit)
if dof not in joints:
joints[dof] = []
joints[dof].append(joint)
# ensure stable order for joint application: dof
joints = sorted(joints.items(), key=lambda kv: kv[0])
ret = []
for k, v in joints:
if k == 1:
ret.append(Revolute(v, body))
elif k == 2:
ret.append(Universal(v, body))
elif k == 3:
ret.append(Spherical(v, body))
else:
raise RuntimeError(f'invalid number of joint limits: {k}')
return ret
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.ssd_meta_arch."""
import functools
import numpy as np
import tensorflow as tf
from object_detection.core import anchor_generator
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import post_processing
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import test_case
from object_detection.utils import test_utils
slim = tf.contrib.slim
class FakeSSDFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
def __init__(self):
super(FakeSSDFeatureExtractor, self).__init__(
is_training=True,
depth_multiplier=0,
min_depth=0,
pad_to_multiple=1,
batch_norm_trainable=True,
conv_hyperparams=None)
def preprocess(self, resized_inputs):
return tf.identity(resized_inputs)
def extract_features(self, preprocessed_inputs):
with tf.variable_scope('mock_model'):
features = slim.conv2d(inputs=preprocessed_inputs, num_outputs=32,
kernel_size=1, scope='layer1')
return [features]
class MockAnchorGenerator2x2(anchor_generator.AnchorGenerator):
"""Sets up a simple 2x2 anchor grid on the unit square."""
def name_scope(self):
return 'MockAnchorGenerator'
def num_anchors_per_location(self):
return [1]
def _generate(self, feature_map_shape_list, im_height, im_width):
return [box_list.BoxList(
tf.constant([[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[1., 1., 1.5, 1.5] # Anchor that is outside clip_window.
], tf.float32))]
def num_anchors(self):
return 4
def _get_value_for_matching_key(dictionary, suffix):
for key in dictionary.keys():
if key.endswith(suffix):
return dictionary[key]
raise ValueError('key not found {}'.format(suffix))
class SsdMetaArchTest(test_case.TestCase):
def _create_model(self, apply_hard_mining=True,
normalize_loc_loss_by_codesize=False):
is_training = False
num_classes = 1
mock_anchor_generator = MockAnchorGenerator2x2()
mock_box_predictor = test_utils.MockBoxPredictor(
is_training, num_classes)
mock_box_coder = test_utils.MockBoxCoder()
fake_feature_extractor = FakeSSDFeatureExtractor()
mock_matcher = test_utils.MockMatcher()
region_similarity_calculator = sim_calc.IouSimilarity()
encode_background_as_zeros = False
def image_resizer_fn(image):
return [tf.identity(image), tf.shape(image)]
classification_loss = losses.WeightedSigmoidClassificationLoss()
localization_loss = losses.WeightedSmoothL1LocalizationLoss()
non_max_suppression_fn = functools.partial(
post_processing.batch_multiclass_non_max_suppression,
score_thresh=-20.0,
iou_thresh=1.0,
max_size_per_class=5,
max_total_size=5)
classification_loss_weight = 1.0
localization_loss_weight = 1.0
negative_class_weight = 1.0
normalize_loss_by_num_matches = False
hard_example_miner = None
if apply_hard_mining:
# This hard example miner is expected to be a no-op.
hard_example_miner = losses.HardExampleMiner(
num_hard_examples=None,
iou_threshold=1.0)
code_size = 4
model = ssd_meta_arch.SSDMetaArch(
is_training, mock_anchor_generator, mock_box_predictor, mock_box_coder,
fake_feature_extractor, mock_matcher, region_similarity_calculator,
encode_background_as_zeros, negative_class_weight, image_resizer_fn,
non_max_suppression_fn, tf.identity, classification_loss,
localization_loss, classification_loss_weight, localization_loss_weight,
normalize_loss_by_num_matches, hard_example_miner, add_summaries=False,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize)
return model, num_classes, mock_anchor_generator.num_anchors(), code_size
def test_preprocess_preserves_shapes_with_dynamic_input_image(self):
image_shapes = [(3, None, None, 3),
(None, 10, 10, 3),
(None, None, None, 3)]
model, _, _, _ = self._create_model()
for image_shape in image_shapes:
image_placeholder = tf.placeholder(tf.float32, shape=image_shape)
preprocessed_inputs, _ = model.preprocess(image_placeholder)
self.assertAllEqual(preprocessed_inputs.shape.as_list(), image_shape)
def test_preprocess_preserves_shape_with_static_input_image(self):
def graph_fn(input_image):
model, _, _, _ = self._create_model()
return model.preprocess(input_image)
input_image = np.random.rand(2, 3, 3, 3).astype(np.float32)
preprocessed_inputs, _ = self.execute(graph_fn, [input_image])
self.assertAllEqual(preprocessed_inputs.shape, [2, 3, 3, 3])
def test_predict_result_shapes_on_image_with_dynamic_shape(self):
batch_size = 3
image_size = 2
input_shapes = [(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model()
preprocessed_input_placeholder = tf.placeholder(tf.float32,
shape=input_shape)
prediction_dict = model.predict(
preprocessed_input_placeholder, true_image_shapes=None)
self.assertTrue('box_encodings' in prediction_dict)
self.assertTrue('class_predictions_with_background' in prediction_dict)
self.assertTrue('feature_maps' in prediction_dict)
self.assertTrue('anchors' in prediction_dict)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
prediction_out = sess.run(prediction_dict,
feed_dict={
preprocessed_input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
expected_box_encodings_shape_out = (batch_size, num_anchors, code_size)
expected_class_predictions_with_background_shape_out = (batch_size,
num_anchors,
num_classes + 1)
self.assertAllEqual(prediction_out['box_encodings'].shape,
expected_box_encodings_shape_out)
self.assertAllEqual(
prediction_out['class_predictions_with_background'].shape,
expected_class_predictions_with_background_shape_out)
def test_predict_result_shapes_on_image_with_static_shape(self):
with tf.Graph().as_default():
_, num_classes, num_anchors, code_size = self._create_model()
def graph_fn(input_image):
model, _, _, _ = self._create_model()
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['feature_maps'],
predictions['anchors'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
(box_encodings, class_predictions, _, _) = self.execute(graph_fn,
[input_image])
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
def test_postprocess_results_are_correct(self):
batch_size = 2
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_boxes = np.array([[[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]], # padding
[[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]] # padding
])
expected_scores = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_classes = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_num_detections = np.array([3, 3])
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, _, _, _ = self._create_model()
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertTrue('detection_boxes' in detections)
self.assertTrue('detection_scores' in detections)
self.assertTrue('detection_classes' in detections)
self.assertTrue('num_detections' in detections)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
self.assertAllClose(detections_out['detection_boxes'], expected_boxes)
self.assertAllClose(detections_out['detection_scores'], expected_scores)
self.assertAllClose(detections_out['detection_classes'], expected_classes)
self.assertAllClose(detections_out['num_detections'],
expected_num_detections)
def test_loss_results_are_correct(self):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (
_get_value_for_matching_key(loss_dict, 'Loss/localization_loss'),
_get_value_for_matching_key(loss_dict, 'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_normalize_by_codesize_true(self):
with tf.Graph().as_default():
_, _, _, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False,
normalize_loc_loss_by_codesize=True)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (_get_value_for_matching_key(loss_dict, 'Loss/localization_loss'),)
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.5 / 4
localization_loss = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
def test_loss_results_are_correct_with_hard_example_mining(self):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model()
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model()
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (
_get_value_for_matching_key(loss_dict, 'Loss/localization_loss'),
_get_value_for_matching_key(loss_dict, 'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_restore_map_for_detection_ckpt(self):
model, _, _, _ = self._create_model()
model.predict(tf.constant(np.array([[[0, 0], [1, 1]], [[1, 0], [0, 1]]],
dtype=np.float32)),
true_image_shapes=None)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_restore_map_for_classification_ckpt(self):
# Define mock tensorflow classification graph and save variables.
test_graph_classification = tf.Graph()
with test_graph_classification.as_default():
image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
with tf.variable_scope('mock_model'):
net = slim.conv2d(image, num_outputs=32, kernel_size=1, scope='layer1')
slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Create tensorflow detection graph and load variables from
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(fine_tune_checkpoint_type='classification')
self.assertNotIn('another_variable', var_map)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session(graph=test_graph_detection) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_load_all_det_checkpoint_vars(self):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model()
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 04 13:19:01 2013
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lmap
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from statsmodels import iolib
from statsmodels.tools.tools import add_constant
from statsmodels.regression.linear_model import OLS
import statsmodels.sandbox.regression.gmm as gmm
def get_griliches76_data():
import os
curdir = os.path.split(__file__)[0]
path = os.path.join(curdir, 'griliches76.dta')
griliches76_data = iolib.genfromdta(path, missing_flt=np.NaN, pandas=True)
# create year dummies
years = griliches76_data['year'].unique()
N = griliches76_data.shape[0]
for yr in years:
griliches76_data['D_%i' %yr] = np.zeros(N)
for i in range(N):
if griliches76_data.ix[i, 'year'] == yr:
griliches76_data.ix[i, 'D_%i' %yr] = 1
else:
pass
griliches76_data['const'] = 1
X = add_constant(griliches76_data[['s', 'iq', 'expr', 'tenure', 'rns',
'smsa', 'D_67', 'D_68', 'D_69', 'D_70',
'D_71', 'D_73']],
prepend=True) # for R comparison
#prepend=False) # for Stata comparison
Z = add_constant(griliches76_data[['expr', 'tenure', 'rns', 'smsa', \
'D_67', 'D_68', 'D_69', 'D_70', 'D_71',
'D_73', 'med', 'kww', 'age', 'mrt']])
Y = griliches76_data['lw']
return Y, X, Z
# use module global to load only once
yg_df, xg_df, zg_df = get_griliches76_data()
endog = np.asarray(yg_df, dtype=float) # TODO: why is yg_df float32
exog, instrument = lmap(np.asarray, [xg_df, zg_df])
assert exog.dtype == np.float64
assert instrument.dtype == np.float64
# from R
#-----------------
varnames = np.array(["(Intercept)", "s", "iq", "expr", "tenure", "rns", "smsa", "D_67", "D_68", "D_69", "D_70",
"D_71", "D_73"])
params = np.array([ 4.03350989, 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323977])
bse = np.array([ 0.31816162, 0.02091823, 0.00474527, 0.00822543, 0.00891969,
0.03447337, 0.03119615, 0.05577582, 0.05246796, 0.05201092,
0.06027671, 0.05461436, 0.05767865])
tvalues = np.array([ 12.6775501, 8.2428242, -1.9174531, 5.9923305, 4.7330205,
-2.9528144, 4.0425165, -1.0688701, 0.9277959, 2.9381834,
2.8939212, 1.6784225, 1.6165385])
pvalues = np.array([ 1.72360000e-33, 7.57025400e-16, 5.55625000e-02,
3.21996700e-09, 2.64739100e-06, 3.24794100e-03,
5.83809900e-05, 2.85474400e-01, 3.53813900e-01,
3.40336100e-03, 3.91575100e-03, 9.36840200e-02,
1.06401300e-01])
#-----------------
def test_iv2sls_r():
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
# print(res.params)
# print(res.params - params)
n, k = exog.shape
assert_allclose(res.params, params, rtol=1e-7, atol=1e-9)
# TODO: check df correction
#assert_allclose(res.bse * np.sqrt((n - k) / (n - k - 1.)), bse,
assert_allclose(res.bse, bse, rtol=0, atol=3e-7)
def test_ivgmm0_r():
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-8, 'disp': 0})
assert_allclose(res.params, params, rtol=1e-4, atol=1e-4)
# TODO : res.bse and bse are not the same, rtol=0.09 is large in this case
#res.bse is still robust?, bse is not a sandwich ?
assert_allclose(res.bse, bse, rtol=0.09, atol=0)
score = res.model.score(res.params, w0)
assert_allclose(score, np.zeros(score.shape), rtol=0, atol=5e-6) # atol=1e-8) ??
def test_ivgmm1_stata():
# copied constant to the beginning
params_stata = np.array(
[ 4.0335099 , 0.17242531, -0.00909883, 0.04928949, 0.04221709,
-0.10179345, 0.12611095, -0.05961711, 0.04867956, 0.15281763,
0.17443605, 0.09166597, 0.09323976])
# robust bse with gmm onestep
bse_stata = np.array(
[ 0.33503289, 0.02073947, 0.00488624, 0.0080498 , 0.00946363,
0.03371053, 0.03081138, 0.05171372, 0.04981322, 0.0479285 ,
0.06112515, 0.0554618 , 0.06084901])
n, k = exog.shape
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
w0 = np.linalg.inv(w0inv)
start = OLS(endog, exog).fit().params
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv, optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
# move constant to end for Stata
idx = lrange(len(params))
idx = idx[1:] + idx[:1]
exog_st = exog[:, idx]
class TestGMMOLS(object):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
# use exog as instrument
nobs, k_instr = exog.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, exog)
res = mod.fit(np.ones(exog.shape[1], float), maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
self.res2 = res_ols
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-4, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-5)
n = res1.model.exog.shape[0]
dffac = 1#np.sqrt((n - 1.) / n) # currently different df in cov calculation
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=5e-6, atol=0)
assert_allclose(res1.bse * dffac, res2.HC0_se, rtol=0, atol=1e-7)
def test_other(self):
res1, res2 = self.res1, self.res2
class CheckGMM(object):
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
n = res1.model.exog.shape[0]
dffac = 1 #np.sqrt((n - 1.) / n) # currently different df in cov calculation
rtol, atol = self.bse_tol
assert_allclose(res1.bse * dffac, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse * dffac, res2.bse, rtol=0, atol=atol)
#skip temporarily
def _est_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.q, res2.Q, rtol=5e-6, atol=0)
assert_allclose(res1.jval, res2.J, rtol=5e-5, atol=0)
def test_hypothesis(self):
res1, res2 = self.res1, self.res2
restriction = np.eye(len(res1.params))
res_t = res1.t_test(restriction)
assert_allclose(res_t.tvalue, res1.tvalues, rtol=1e-12, atol=0)
assert_allclose(res_t.pvalue, res1.pvalues, rtol=1e-12, atol=0)
rtol, atol = self.bse_tol
assert_allclose(res_t.tvalue, res2.tvalues, rtol=rtol*10, atol=atol)
assert_allclose(res_t.pvalue, res2.pvalues, rtol=rtol*10, atol=atol)
res_f = res1.f_test(restriction[:-1]) # without constant
# comparison with fvalue is not possible, those are not defined
# assert_allclose(res_f.fvalue, res1.fvalue, rtol=1e-12, atol=0)
# assert_allclose(res_f.pvalue, res1.f_pvalue, rtol=1e-12, atol=0)
# assert_allclose(res_f.fvalue, res2.F, rtol=1e-10, atol=0)
# assert_allclose(res_f.pvalue, res2.Fp, rtol=1e-08, atol=0)
# Smoke test for Wald
res_wald = res1.wald_test(restriction[:-1])
class TestGMMSt1(CheckGMM):
@classmethod
def setup_class(self):
#self.bse_tol = [5e-7, 5e-7]
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=10, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches_iter import results
self.res2 = results
class TestGMMStTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False})
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStTwostepNO(CheckGMM):
#with Stata default `has_optimal_weights=False`
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [1e-6, 5e-5]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res10 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res10
from .results_gmm_griliches import results_twostep as results
self.res2 = results
class TestGMMStOnestep(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs',
optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1._cov_params(has_optimal_weights=False))))
#weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#nobs = instrument.shape[0]
#w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, np.linalg.inv(self.res1.weights))
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOnestepNO(CheckGMM):
# matches Stats's defaults wargs={'centered':False}, has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiter(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-4, 5e-5]
self.bse_tol = [7e-3, 5e-4]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_bse_other(self):
res1, res2 = self.res1, self.res2
moms = res1.model.momcond(res1.params)
w = res1.model.calc_weightmatrix(moms)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1._cov_params(has_optimal_weights=False,
weights=res1.weights))))
# TODO: doesn't look different
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
bse = np.sqrt(np.diag((res1._cov_params(has_optimal_weights=False,
#use_weights=True #weights=w
))))
#assert_allclose(res1.bse, res2.bse, rtol=5e-06, atol=0)
#This doesn't replicate Stata oneway either
nobs = instrument.shape[0]
w0inv = np.dot(instrument.T, instrument) / nobs
q = self.res1.model.gmmobjective(self.res1.params, w)#self.res1.weights)
#assert_allclose(q, res2.Q, rtol=5e-6, atol=0)
class TestGMMStOneiterNO(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [1e-5, 1e-6]
self.bse_tol = [5e-6, 5e-7]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
#------------ Crosscheck subclasses
class TestGMMStOneiterNO_Linear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-9, 1e-9]
self.bse_tol = [5e-10, 1e-10]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
class TestGMMStOneiterNO_Nonlinear(CheckGMM):
@classmethod
def setup_class(self):
# compare to Stata default options, onestep GMM
# this uses maxiter=1, one iteration in loop
self.params_tol = [5e-5, 5e-6]
self.bse_tol = [5e-6, 1e-1]
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
def func(params, exog):
return np.dot(exog, params)
mod = gmm.NonlinearIVGMM(endog, exog, instrument, func)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res3 = res
from .results_gmm_griliches import results_onestep as results
self.res2 = results
def test_score(self):
params = self.res1.params * 1.1
weights = self.res1.weights
sc1 = self.res1.model.score(params, weights)
sc2 = super(self.res1.model.__class__, self.res1.model).score(params,
weights)
assert_allclose(sc1, sc2, rtol=1e-6, atol=0)
assert_allclose(sc1, sc2, rtol=0, atol=1e-7)
# score at optimum
sc1 = self.res1.model.score(self.res1.params, weights)
assert_allclose(sc1, np.zeros(len(params)), rtol=0, atol=1e-8)
class TestGMMStOneiterOLS_Linear(CheckGMM):
@classmethod
def setup_class(self):
# replicating OLS by GMM - high agreement
self.params_tol = [1e-11, 1e-12]
self.bse_tol = [1e-12, 1e-12]
exog = exog_st # with const at end
res_ols = OLS(endog, exog).fit()
#Note: start is irrelevant but required
start = np.ones(len(res_ols.params))
nobs, k_instr = instrument.shape
w0inv = np.dot(exog.T, exog) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.LinearIVGMM(endog, exog, exog)
res = mod.fit(start, maxiter=0, inv_weights=w0inv,
#optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0},
optim_args={'disp': 0},
weights_method='iid',
wargs={'centered':False, 'ddof':'k_params'},
has_optimal_weights=True)
# fix use of t distribution see #2495 comment
res.use_t = True
res.df_resid = res.nobs - len(res.params)
self.res1 = res
#from .results_gmm_griliches import results_onestep as results
#self.res2 = results
self.res2 = res_ols
#------------------
class TestGMMSt2(object):
# this looks like an old version, trying out different comparisons
# of options with Stats
@classmethod
def setup_class(self):
# compare to Stata default options, iterative GMM
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
#w0 = np.linalg.inv(w0inv)
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=2, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res1 = res
from .results_ivreg2_griliches import results_gmm2s_robust as results
self.res2 = results
# TODO: remove after testing, compare bse from 1 iteration
# see test_basic
mod = gmm.IVGMM(endog, exog, instrument)
res = mod.fit(start, maxiter=1, inv_weights=w0inv,
wargs={'ddof':0, 'centered':False},
optim_method='bfgs', optim_args={'gtol':1e-6, 'disp': 0})
self.res3 = res
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=5e-05, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=5e-06)
n = res1.model.exog.shape[0]
# TODO: check df correction np.sqrt(745./758 )*res1.bse matches better
dffact = np.sqrt(745. / 758 )
assert_allclose(res1.bse * dffact, res2.bse, rtol=5e-03, atol=0)
assert_allclose(res1.bse * dffact, res2.bse, rtol=0, atol=5e-03)
# try other versions for bse,
# TODO: next two produce the same as before (looks like)
bse = np.sqrt(np.diag((res1._cov_params(has_optimal_weights=True,
weights=res1.weights))))
assert_allclose(res1.bse, res2.bse, rtol=5e-01, atol=0)
bse = np.sqrt(np.diag((res1._cov_params(has_optimal_weights=True,
weights=res1.weights,
use_weights=True))))
assert_allclose(res1.bse, res2.bse, rtol=5e-02, atol=0)
# TODO: resolve this
# try bse from previous step, is closer to Stata
# guess: Stata ivreg2 doesn't calc for bse update after final iteration
# need better test case, bse difference is close to numerical optimization precision
assert_allclose(self.res3.bse, res2.bse, rtol=5e-05, atol=0)
assert_allclose(self.res3.bse, res2.bse, rtol=0, atol=5e-06)
# TODO; tvalues are not available yet, no inheritance
#assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
class CheckIV2SLS(object):
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
assert_allclose(res1.params, res2.params, rtol=1e-9, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=1e-10)
n = res1.model.exog.shape[0]
assert_allclose(res1.bse, res2.bse, rtol=1e-10, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=1e-11)
assert_allclose(res1.tvalues, res2.tvalues, rtol=5e-10, atol=0)
def test_other(self):
res1, res2 = self.res1, self.res2
assert_allclose(res1.rsquared, res2.r2, rtol=1e-7, atol=0)
assert_allclose(res1.rsquared_adj, res2.r2_a, rtol=1e-7, atol=0)
# TODO: why is fvalue different, IV2SLS uses inherited linear
assert_allclose(res1.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res1.f_pvalue, res2.Fp, rtol=1e-8, atol=0)
assert_allclose(np.sqrt(res1.mse_resid), res2.rmse, rtol=1e-10, atol=0)
assert_allclose(res1.ssr, res2.rss, rtol=1e-10, atol=0)
assert_allclose(res1.uncentered_tss, res2.yy, rtol=1e-10, atol=0)
assert_allclose(res1.centered_tss, res2.yyc, rtol=1e-10, atol=0)
assert_allclose(res1.ess, res2.mss, rtol=1e-9, atol=0)
assert_equal(res1.df_model, res2.df_m)
assert_equal(res1.df_resid, res2.df_r)
# TODO: llf raise NotImplementedError
#assert_allclose(res1.llf, res2.ll, rtol=1e-10, atol=0)
def test_hypothesis(self):
res1, res2 = self.res1, self.res2
restriction = np.eye(len(res1.params))
res_t = res1.t_test(restriction)
assert_allclose(res_t.tvalue, res1.tvalues, rtol=1e-12, atol=0)
assert_allclose(res_t.pvalue, res1.pvalues, rtol=1e-12, atol=0)
res_f = res1.f_test(restriction[:-1]) # without constant
# TODO res1.fvalue problem, see issue #1104
assert_allclose(res_f.fvalue, res1.fvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.pvalue, res1.f_pvalue, rtol=1e-12, atol=0)
assert_allclose(res_f.fvalue, res2.F, rtol=1e-10, atol=0)
assert_allclose(res_f.pvalue, res2.Fp, rtol=1e-08, atol=0)
def test_hausman(self):
res1, res2 = self.res1, self.res2
hausm = res1.spec_hausman()
# hausman uses se2 = ssr / nobs, no df correction
assert_allclose(hausm[0], res2.hausman['DWH'], rtol=1e-11, atol=0)
assert_allclose(hausm[1], res2.hausman['DWHp'], rtol=1e-10, atol=1e-25)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestIV2SLSSt1(CheckIV2SLS):
@classmethod
def setup_class(self):
exog = exog_st # with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
mod = gmm.IV2SLS(endog, exog, instrument)
res = mod.fit()
self.res1 = res
from .results_ivreg2_griliches import results_small as results
self.res2 = results
|
|
import numpy as N
import os, sys
try:
import pycuda.autoinit
GPU = True
except:# ImportError or pycuda._driver.LogicError if NVIDIA graphic card is not available
GPU = False
try:
import fftw3
FFTW = True
except ImportError:
FFTW = False
if FFTW:
MODULS = {N.float64: fftw3,
N.complex128: fftw3}
RTYPES = {N.float64: N.complex128}
CTYPES = {N.complex128: N.float64}
RTYPE = N.float64
CTYPE = N.complex128
try:
import fftw3f
MODULS[N.float32] = fftw3f
MODULS[N.complex64] = fftw3f
RTYPES[N.float32] = N.complex64
CTYPES[N.complex64] = N.float32
osbit = 32
if sys.platform == 'win32':
if sys.maxsize > (2**32):
osbit = 64
elif os.uname()[-1] != 'x86_64':
osbit = 64
if osbit == 64:
RTYPE = N.float32
CTYPE = N.complex64
except ImportError:
RTYPES[N.float32] = N.complex128
CTYPES[N.complex64] = N.float64
if hasattr(N, 'float128'):
try:
import fftw3l
MODULS[N.float128] = fftw3l
MODULS[N.complex256] = fftw3l
RTYPES[N.float128] = N.complex256
CTYPES[N.complex256] = N.float128
except ImportError:
RTYPES[N.float128] = N.complex128
CTYPES[N.complex256] = N.float64
else:
pass
#print 'WARNING: your numpy does not support fftw3 long-double'
try:
import multiprocessing as mp
ncpu = mp.cpu_count()
except ImportError:
ncpu = 1
def _rfft(a, nthreads=ncpu):
if a.shape[-1] % 2:
a = N.ascontiguousarray(a[...,:-1])
shape = list(a.shape)
shape[-1] = shape[-1] // 2 + 1
b = N.empty(shape, RTYPES[a.dtype.type])
return _fft3(a, b, nthreads=nthreads, direction='forward', realtypes='halfcomplex r2c')
def _irfft(a, normalize=True, nthreads=ncpu):
a = a.copy() # irfft of fftw3 mess up the input
shape = list(a.shape)
shape[-1] = (shape[-1] - 1) * 2
b = N.empty(shape, CTYPES[a.dtype.type])
if normalize:
vol = N.product(a.shape[:-1])
vol *= (a.shape[-1]-1)*2
else:
vol = 1
return _fft3(a, b, nthreads=nthreads, direction='backward', realtypes='halfcomplex r2c') / vol
def _fft3(a, b, nthreads=ncpu, **kwds):
if 0 in a.shape:
raise ValueError('This array cannot be transformed, shape: %s' % str(a.shape))
modul = MODULS.get(a.dtype.type, fftw3)
plan = modul.Plan(a, b, nthreads=nthreads, **kwds)
plan()
return b
def fft(a, nthreads=ncpu):
b = N.empty_like(a) # assuming complex type
return _fft3(a, b, nthreads=nthreads, direction='forward')
def ifft(a, normalize=True, nthreads=ncpu):
a = a.copy() # irfft of fftw3 mess up the input
b = N.empty_like(a) # assuming complex type
if normalize:
vol = N.product(a.shape)
else:
vol = 1
return _fft3(a, b, nthreads=nthreads, direction='backward') / vol
else:
import numpy.fft as fftw
ncpu = 1
RTYPE = N.float32
RTYPES = (N.float32, N.float64)
CTYPE = N.complex64
CTYPES = (N.complex64, N.complex128)
def _rfft(a, nthreads=ncpu):
return fftw.rfftn(a)
def _irfft(a, normalize=True, nthreads=ncpu):
if normalize:
return fftw.irfftn(a)
else:
return fftw.irfft(a)
def fft(a, nthreads=ncpu):
return fftw.fftn(a)
def ifft(a, normalize=True, nthreads=ncpu):
if normalize:
return fftw.ifftn(a)
else:
return fftw.ifft(a)
if GPU:
# only rfft can be done with skcuda
# other GPU-based FFT is not compatible with ND arrays
from pycuda import gpuarray
from pycuda import driver
from skcuda import fft
G_RTYPES = {N.float32: N.complex64,
N.float64: N.complex128}
G_CTYPES = {N.complex64: N.float32,
N.complex128: N.float64}
G_RTYPE = N.float32
G_CTYPE = N.complex64
# child thread cannot use context from the main thread..., call this from the main thread
def detach_gpu_context():
pycuda.autoinit.context.pop() # empty context stack -> cannot pop non-current context
pycuda.autoinit.context.detach()
def is_memory_enough(a):
try:
rest, total = driver.mem_get_info()
except driver.LogicError: # child thread cannot use context from the main thread...
# the following does not work yet
from pycuda import tools
import skcuda
driver.init()
context = tools.make_default_context() # try to make as new context, but cannot deactivate the old context stack
device = context.get_device()
skcuda.misc.init_context(device)
rest, total = driver.mem_get_info()
if (sys.getsizeof(a) * 2) < rest:
return True
def rfft(a, nthreads=0):
if is_memory_enough(a):
arg = gpuarray.to_gpu(a)
shape = [s for s in a.shape]
shape[-1] = shape[-1]//2 + 1
ctype = G_RTYPES[a.dtype.type]
afg = gpuarray.empty(shape, ctype)
plan = fft.Plan(shape, a.dtype.type, ctype)
print(shape, a.dtype.type, ctype)
fft.fft(arg, afg, plan)
return afg.get()
else:
return _rfft(a)
def irfft(a, normalize=True, nthreads=0):
if is_memory_enough(a):
arg = gpuarray.to_gpu(a)
shape = [s for s in a.shape]
shape[-1] = (shape[-1]-1)*2
rtype = G_CTYPES[a.dtype.type]
afg = gpuarray.empty(shape, rtype)
plan = fft.Plan(shape, a.dtype.type, rtype)
fft.ifft(arg, afg, plan)
return afg.get()
else:
return _irfft(a)
else:
rfft = _rfft
irfft = _irfft
fft = fft
ifft = ifft
|
|
import mock
import unittest
from vnc_api.vnc_api import *
from svc_monitor.virtual_machine_manager import VirtualMachineManager
from svc_monitor.config_db import *
import test_common_utils as test_utils
class VirtualMachineManagerTest(unittest.TestCase):
def setUp(self):
VirtualMachineSM._cassandra = mock.MagicMock()
VirtualMachineSM._cassandra.read = test_utils.vm_db_read
VirtualMachineInterfaceSM._cassandra = mock.MagicMock()
VirtualMachineInterfaceSM._cassandra.read = test_utils.vmi_db_read
InstanceIpSM._cassandra = mock.MagicMock()
InstanceIpSM._cassandra.read = test_utils.iip_db_read
InterfaceRouteTableSM._cassandra = mock.MagicMock()
InterfaceRouteTableSM._cassandra.read = test_utils.irt_db_read
self.mocked_vnc = mock.MagicMock()
self.mocked_vnc.fq_name_to_id = test_utils.get_vn_id_for_fq_name
self.mocked_vnc.virtual_network_create = test_utils.vn_create
self.mocked_vnc.virtual_machine_interface_create = test_utils.vmi_create
self.mocked_vnc.instance_ip_create = test_utils.iip_create
self.nova_mock = mock.MagicMock()
self.mocked_db = mock.MagicMock()
self.mocked_args = mock.MagicMock()
self.mocked_args.availability_zone = 'default-availability-zone'
self.log_mock = mock.MagicMock()
self.vm_manager = VirtualMachineManager(
db=self.mocked_db, logger=self.log_mock,
vnc_lib=self.mocked_vnc, vrouter_scheduler=mock.MagicMock(),
nova_client=self.nova_mock, args=self.mocked_args,
agent_manager=mock.MagicMock())
def tearDown(self):
ServiceTemplateSM.reset()
ServiceInstanceSM.reset()
InstanceIpSM.reset()
VirtualMachineInterfaceSM.reset()
VirtualMachineSM.reset()
del InterfaceRouteTableSM._cassandra
del VirtualMachineSM._cassandra
def test_virtual_machine_create(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
self.assertTrue(si.availability_zone, 'default-availability-zone')
def test_virtual_machine_delete(self):
vm = test_utils.create_test_virtual_machine('fake-vm-uuid')
self.vm_manager.delete_service(vm)
def test_missing_image_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['image_name'] = None
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with("Image not present in %s" % ((':').join(st.fq_name)))
def test_missing_image_in_nova(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'images' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with("Image not found %s" % si.image)
def test_nova_vm_create_fail(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_any_call(test_utils.AnyStringWith('Nova vm create failed'))
def test_missing_flavor_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'flavors' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['flavor'] = None
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with(test_utils.AnyStringWith("Flavor not found"))
def test_availability_zone_setting(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['availability_zone_enable'] = True
si.params['availability_zone'] = 'test-availability-zone'
self.vm_manager.create_service(st, si)
self.assertTrue(si.availability_zone, 'test-availability-zone')
def test_network_config_validation(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['interface_type'] = []
self.vm_manager.create_service(st, si)
self.log_mock.log_notice.assert_called_with("Interface list empty for ST %s SI %s" %
((':').join(st.fq_name), (':').join(si.fq_name)))
def test_virtual_machine_exists(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer(kwargs['name'], kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.mocked_vnc.virtual_machine_create = test_utils.vm_create
self.vm_manager.create_service(st, si)
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith('Launching VM :'))
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith('Created VM :'))
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith(si.name))
self.log_mock.reset_mock()
self.vm_manager.create_service(st, si)
self.assertTrue(self.log_mock.log_info.call_count, 1)
def test_virtual_machine_static_routes(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True, True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 KenV99
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time
import re
import xbmc
debug = True
remote = False
if debug:
if remote:
import sys
sys.path.append(r'C:\\Users\\Ken User\\AppData\\Roaming\\XBMC\\addons\\script.ambibox\\resources\\lib\\'
r'pycharm-debug.py3k\\')
import pydevd
pydevd.settrace('192.168.1.103', port=51234, stdoutToServer=True, stderrToServer=True)
else:
sys.path.append('C:\Program Files (x86)\JetBrains\PyCharm 3.1.3\pycharm-debug-py3k.egg')
import pydevd
pydevd.settrace('localhost', port=51234, stdoutToServer=True, stderrToServer=True)
def round_down_to_half_hour(xtime):
"""
Returns a string with the time rounded down to the nearest half-hour in format h:mm am
Example:
now_time = time.localtime()
r_time = round_down_to_half_hour(now_time)
> 7:30 am
@param xtime: the time to be converted in either string YYYYmmddHHMMSS format or a time.struct_time typed obj
@type xtime: str
@type xtime: time.struct_time
@rtype: str
"""
if type(xtime) is str:
try:
xxtime = time.strptime(xtime, '%Y%m%d%H%M%S')
except:
return None
elif type(xtime) is time.struct_time:
xxtime = xtime
else:
return None
if xxtime.tm_min <= 30:
newmin = ':00'
else:
newmin = ':30'
xformat = xbmc.getRegion('time').replace(':%S', '').replace('%H%H', '%H')
tmp = time.strftime(xformat, xxtime).lower()
ret = re.sub(':..', newmin, tmp)
return ret
##########################################################################################
__killme__ = False
import urllib2
import types
def download_file(url, obj2rcv_status, int_percentofjob, int_block_size):
"""
Downloads data/file from url and returns it as a string
Sends status updates as a percent done to obj2rcv_status.setstatus() - obj must have this fxn
Needs to be run at a separate thread/process if using the global flag __killme__ to abort
@param url: the url to download from - no error checking done
@type url: str
@param obj2rcv_status: the object to receive status updates, set to None if not using
@type obj2rcv_status: function
@param int_percentofjob: the percent to show when 100% job done, set to 100 if this is complete job; useful if
the download is only one part of a larger job
@type int_percentofjob: int
@param int_block_size: the block size to use during download ie. 2048, 4096, 8192
@type int_block_size: int
@rtype: str
"""
global __killme__
try:
data = ''
u = urllib2.urlopen(url)
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_size_dl = 0
block_sz = int_block_size
while True and not __killme__:
mbuffer = u.read(block_sz)
if not mbuffer:
break
file_size_dl += len(mbuffer)
data += mbuffer
state = int(file_size_dl * float(int_percentofjob) / file_size)
if obj2rcv_status is not None:
obj2rcv_status.setstatus(state)
else:
if __killme__:
raise AbortDownload('downloading')
del u
return data
except AbortDownload, e:
__killme__ = False
if e.value == 'downloading':
try:
if u is not None:
del u
return None
except:
return None
class AbortDownload(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##########################################################################################
import threading
class GenericThread(threading.Thread):
def __init__(self, xtarget, threadname):
threading.Thread.__init__(self, name=threadname)
self.xtarget = xtarget
def start(self):
threading.Thread.start(self)
def run(self):
self.xtarget()
def stop(self):
self.join(2000)
########################################################################################
import math
class Stream_stats():
"""
Uses numerical methods to calculate ongoing mean and variance without holding large sums in memory
Con is for small sample sizes, there may be some inaccuracy
After creating an instance, call update() with the varaible you want stats on
Reset variance (reset_var) can be called, keeping last mean as starting point
In addition, init_mean can be used if a previous mean is known
pdf returns the probabilty density function at x given the current mean and variance
cdf returns the cumulative distribution function
"""
def __init__(self):
self.last_mean = None
self.last_var = 0.0
self.n = 0
def update(self, x):
self.n += 1
if self.last_mean is None:
self.last_mean = x
alpha = 1.0/self.n
m = self.last_mean + (x - self.last_mean)/self.n
v = ((self.n - 1) * self.last_var + (x - self.last_mean) * (x - m)) * alpha
self.last_mean = m
self.last_var = v
def mean(self):
return self.last_mean
def var(self):
return self.last_var
def sd(self):
return math.sqrt(self.last_var)
def count(self):
return self.n
def reset_var(self):
self.last_var = 0.0
self.n = 0
def init_mean(self, m):
self.last_mean = m
def pdf(self, x):
denom = math.sqrt(2*math.pi*self.last_var)
num = math.exp(-(float(x)-float(self.last_mean))**2/(2*self.last_var))
return num/denom
def cdf(self, x):
return 0.5 * (1 + math.erf((x - self.last_mean)/math.sqrt(2 * self.last_var)))
|
|
"""`PulseTrain`, `BiphasicPulseTrain`, `AsymmetricBiphasicPulseTrain`"""
import numpy as np
import logging
# DT: Sampling time step (ms); defines the duration of the signal edge
# transitions:
from .base import Stimulus
from .pulses import BiphasicPulse, AsymmetricBiphasicPulse, MonophasicPulse
from ..utils.constants import DT
class PulseTrain(Stimulus):
"""Generic pulse train
Can be used to concatenate single pulses into a pulse train.
.. seealso ::
* :py:class:`~pulse2percept.stimuli.BiphasicPulseTrain`
* :py:class:`~pulse2percept.stimuli.AsymmetricBiphasicPulseTrain`
.. versionadded:: 0.6
Parameters
----------
freq : float
Pulse train frequency (Hz).
pulse : :py:class:`~pulse2percept.stimuli.Stimulus`
A Stimulus object containing a single pulse that will be concatenated.
n_pulses : int
Number of pulses requested in the pulse train. If None, the entire
stimulation window (``stim_dur``) is filled.
stim_dur : float, optional
Total stimulus duration (ms). The pulse train will be trimmed to make
the stimulus last ``stim_dur`` ms overall.
electrode : { int | string }, optional
Optionally, you can provide your own electrode name.
metadata : dict
A dictionary of meta-data
Notes
-----
* If the pulse train frequency does not exactly divide ``stim_dur``, the
number of pulses will be rounded down. For example, when trying to fit
a 11 Hz pulse train into a 100 ms window, there will be 9 pulses.
"""
def __init__(self, freq, pulse, n_pulses=None, stim_dur=1000.0,
electrode=None, metadata=None):
if not isinstance(pulse, Stimulus):
raise TypeError(f"'pulse' must be a Stimulus object, not "
f"{type(pulse)}.")
if pulse.shape[0] == 0:
raise ValueError(f"'pulse' has invalid shape "
f"({pulse.shape[0]}, {pulse.shape[1]}).")
if pulse.time is None:
raise ValueError("'pulse' does not have a time component.")
# How many pulses fit into stim dur:
n_max_pulses = freq * stim_dur / 1000.0
# The requested number of pulses cannot be greater than max pulses:
if n_pulses is not None:
n_pulses = int(n_pulses)
if n_pulses > n_max_pulses:
raise ValueError(f"stim_dur={stim_dur:.2f} cannot fit more than "
f"{n_max_pulses} pulses.")
else:
# `freq` might not perfectly divide `stim_dur`, so we will create
# one extra pulse and trim to the right length:
n_pulses = int(np.ceil(n_max_pulses))
# 0 Hz is allowed:
if n_pulses <= 0:
time = np.array([0, stim_dur], dtype=np.float32)
data = np.array([[0, 0]], dtype=np.float32)
else:
# Window duration is the inverse of pulse train frequency:
window_dur = 1000.0 / freq
if pulse.time[-1] > window_dur:
raise ValueError(f"Pulse (dur={pulse.time[-1]:.2f} ms) does not fit into "
f"pulse train window (dur={window_dur:.2f} "
f"ms)")
shift = np.maximum(0, window_dur - pulse.time[-1])
pt = pulse
for i in range(1, n_pulses):
pt = pt.append(pulse >> shift)
data = pt.data
time = pt.time
if time[-1] > stim_dur + DT:
# If stimulus is longer than the requested `stim_dur`, trim it.
# Make sure to interpolate the end point:
last_col = [np.interp(stim_dur, time, row) for row in data]
last_col = np.array(last_col).reshape((-1, 1))
t_idx = time < stim_dur
data = np.hstack((data[:, t_idx], last_col))
time = np.append(time[t_idx], stim_dur)
elif time[-1] < stim_dur - DT:
# If stimulus is shorter than the requested `stim_dur`, add a zero:
data = np.hstack((data, np.zeros((pulse.data.shape[0], 1))))
time = np.append(time, stim_dur)
super().__init__(data, time=time, electrodes=electrode, metadata=None,
compress=False)
self.freq = freq
self.pulse_type = pulse.__class__.__name__
self.metadata = {'user': metadata}
def _pprint_params(self):
"""Return a dict of class arguments to pretty-print"""
params = super(PulseTrain, self)._pprint_params()
params.update({'freq': self.freq,
'pulse_type': self.pulse_type})
return params
class BiphasicPulseTrain(Stimulus):
"""Symmetric biphasic pulse train
A train of symmetric biphasic pulses.
.. versionadded:: 0.6
Parameters
----------
freq : float
Pulse train frequency (Hz).
amp : float
Current amplitude (uA). Negative currents: cathodic, positive: anodic.
The sign will be converted automatically depending on
``cathodic_first``.
phase_dur : float
Duration (ms) of the cathodic/anodic phase.
interphase_dur : float, optional, default: 0
Duration (ms) of the gap between cathodic and anodic phases.
delay_dur : float
Delay duration (ms). Zeros will be inserted at the beginning of the
stimulus to deliver the first pulse phase after ``delay_dur`` ms.
n_pulses : int
Number of pulses requested in the pulse train. If None, the entire
stimulation window (``stim_dur``) is filled.
stim_dur : float, optional, default: 1000 ms
Total stimulus duration (ms). The pulse train will be trimmed to make
the stimulus last ``stim_dur`` ms overall.
cathodic_first : bool, optional, default: True
If True, will deliver the cathodic pulse phase before the anodic one.
electrode : { int | string }, optional, default: 0
Optionally, you can provide your own electrode name.
metadata : dict
A dictionary of meta-data
Notes
-----
* Each cycle ("window") of the pulse train consists of a symmetric
biphasic pulse, created with
:py:class:`~pulse2percept.stimuli.BiphasicPulse`.
* The order and sign of the two phases (cathodic/anodic) of each pulse
in the train is automatically adjusted depending on the
``cathodic_first`` flag.
* A pulse train will be considered "charge-balanced" if its net current is
smaller than 10 picoamps.
"""
def __init__(self, freq, amp, phase_dur, interphase_dur=0, delay_dur=0,
n_pulses=None, stim_dur=1000.0, cathodic_first=True,
electrode=None, metadata=None):
# Create the individual pulse:
pulse = BiphasicPulse(amp, phase_dur, delay_dur=delay_dur,
interphase_dur=interphase_dur,
cathodic_first=cathodic_first,
electrode=electrode)
# Concatenate the pulses:
pt = PulseTrain(freq, pulse, n_pulses=n_pulses, stim_dur=stim_dur)
super().__init__(pt.data, time=pt.time, compress=False)
self.freq = freq
self.cathodic_first = cathodic_first
# Store metadata for BiphasicAxonMapModel
self.metadata = {'freq': freq,
'amp': amp,
'phase_dur': phase_dur,
'delay_dur': delay_dur,
'user': metadata}
def _pprint_params(self):
"""Return a dict of class arguments to pretty-print"""
params = super(BiphasicPulseTrain, self)._pprint_params()
params.update({'cathodic_first': self.cathodic_first,
'freq': self.freq})
return params
class AsymmetricBiphasicPulseTrain(Stimulus):
"""Asymmetric biphasic pulse
A simple stimulus consisting of a single biphasic pulse: a cathodic and an
anodic phase, optionally separated by an interphase gap.
The two pulse phases can have different amplitudes and duration
("asymmetric").
The order of the two phases is given by the ``cathodic_first`` flag.
.. versionadded:: 0.6
Parameters
----------
freq : float
Pulse train frequency (Hz).
amp1, amp2 : float
Current amplitude (uA) of the first and second pulse phases.
Negative currents: cathodic, positive: anodic.
The signs will be converted automatically depending on
``cathodic_first``.
phase_dur1, phase_dur2 : float
Duration (ms) of the first and second pulse phases.
interphase_dur : float, optional, default: 0
Duration (ms) of the gap between cathodic and anodic phases.
delay_dur : float
Delay duration (ms). Zeros will be inserted at the beginning of the
stimulus to deliver the first pulse phase after ``delay_dur`` ms.
n_pulses : int
Number of pulses requested in the pulse train. If None, the entire
stimulation window (``stim_dur``) is filled.
stim_dur : float, optional, default: 1000 ms
Total stimulus duration (ms). Zeros will be inserted at the end of the
stimulus to make the the stimulus last ``stim_dur`` ms overall.
cathodic_first : bool, optional, default: True
If True, will deliver the cathodic pulse phase before the anodic one.
electrode : { int | string }, optional, default: 0
Optionally, you can provide your own electrode name.
metadata : dict
A dictionary of meta-data
"""
def __init__(self, freq, amp1, amp2, phase_dur1, phase_dur2,
interphase_dur=0, delay_dur=0, n_pulses=None, stim_dur=1000.0,
cathodic_first=True, electrode=None, metadata=None):
# Create the individual pulse:
pulse = AsymmetricBiphasicPulse(amp1, amp2, phase_dur1, phase_dur2,
delay_dur=delay_dur,
interphase_dur=interphase_dur,
cathodic_first=cathodic_first,
electrode=electrode)
# Concatenate the pulses:
pt = PulseTrain(freq, pulse, n_pulses=n_pulses, stim_dur=stim_dur)
super().__init__(pt.data, time=pt.time, compress=False)
self.freq = freq
self.cathodic_first = cathodic_first
self.metadata = {'user': metadata}
def _pprint_params(self):
"""Return a dict of class arguments to pretty-print"""
params = super(AsymmetricBiphasicPulseTrain, self)._pprint_params()
params.update({'cathodic_first': self.cathodic_first,
'freq': self.freq})
return params
class BiphasicTripletTrain(Stimulus):
"""Biphasic pulse triplets
A train of symmetric biphasic pulse triplets.
.. versionadded:: 0.6
Parameters
----------
freq : float
Pulse train frequency (Hz).
amp : float
Current amplitude (uA). Negative currents: cathodic, positive: anodic.
The sign will be converted automatically depending on
``cathodic_first``.
phase_dur : float
Duration (ms) of the cathodic/anodic phase.
interphase_dur : float, optional, default: 0
Duration (ms) of the gap between cathodic and anodic phases.
delay_dur : float
Delay duration (ms). Zeros will be inserted at the beginning of the
stimulus to deliver the first pulse phase after ``delay_dur`` ms.
interpulse_dur : float, optional, default: 0
Delay duration (ms) between each biphasic pulse within the train. Note,
this delay is also applied after the third biphasic pulse
n_pulses : int
Number of pulses requested in the pulse train. If None, the entire
stimulation window (``stim_dur``) is filled.
stim_dur : float, optional, default: 1000 ms
Total stimulus duration (ms). The pulse train will be trimmed to make
the stimulus last ``stim_dur`` ms overall.
cathodic_first : bool, optional, default: True
If True, will deliver the cathodic pulse phase before the anodic one.
electrode : { int | string }, optional, default: 0
Optionally, you can provide your own electrode name.
metadata : dict
A dictionary of meta-data
Notes
-----
* Each cycle ("window") of the pulse train consists of three biphasic
pulses, created with
:py:class:`~pulse2percept.stimuli.BiphasicPulse`.
* The order and sign of the two phases (cathodic/anodic) of each pulse
in the train is automatically adjusted depending on the
``cathodic_first`` flag.
* A pulse train will be considered "charge-balanced" if its net current is
smaller than 10 picoamps.
"""
def __init__(self, freq, amp, phase_dur, interphase_dur=0, interpulse_dur=0,
delay_dur=0, n_pulses=None, stim_dur=1000.0, cathodic_first=True,
electrode=None, metadata=None):
# Create the pulse:
pulse = BiphasicPulse(amp, phase_dur, interphase_dur=interphase_dur,
delay_dur=delay_dur,
cathodic_first=cathodic_first,
electrode=electrode)
if interpulse_dur != 0:
# Create an interpulse 'delay' pulse:
delay_pulse = MonophasicPulse(0, interpulse_dur)
pulse = pulse.append(delay_pulse)
# Create the pulse triplet:
triplet = pulse.append(pulse).append(pulse)
# Create the triplet train:
pt = PulseTrain(freq, triplet, n_pulses=n_pulses, stim_dur=stim_dur)
# Set up the Stimulus object through the constructor:
super(BiphasicTripletTrain, self).__init__(pt.data, time=pt.time,
compress=False)
self.freq = freq
self.cathodic_first = cathodic_first
self.metadata = {'user': metadata}
def _pprint_params(self):
"""Return a dict of class arguments to pretty-print"""
params = super(BiphasicTripletTrain, self)._pprint_params()
params.update({'cathodic_first': self.cathodic_first,
'freq': self.freq})
return params
|
|
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import datetime
import decimal
from gluon.storage import Storage
from gluon.html import TAG, XmlComponent, xmlescape
from gluon.languages import lazyT
import gluon.contrib.rss2 as rss2
try:
# try external module
import simplejson as json_parser
except ImportError:
try:
# try stdlib (Python >= 2.6)
import json as json_parser
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json_parser
# simplejson >= 2.1.3 needs use_decimal = False
# to stringify decimals
decimal_false_option = json_parser.__version__.split('.') >= ['2', '1', '3']
have_yaml = True
try:
import yaml as yamlib
except ImportError:
have_yaml = False
def cast_keys(o, cast=str, encoding="utf-8"):
"""
Builds a new object with <cast> type keys.
Use this function if you are in Python < 2.6.5
This avoids syntax errors when unpacking dictionary arguments.
Args:
o: is the object input
cast: (defaults to str) is an object type or function
which supports conversion such as:
converted = cast(o)
encoding: (defaults to utf-8) is the encoding for unicode
keys. This is not used for custom cast functions
"""
if isinstance(o, (dict, Storage)):
if isinstance(o, dict):
newobj = dict()
else:
newobj = Storage()
for k, v in o.items():
if (cast == str) and isinstance(k, unicode):
key = k.encode(encoding)
else:
key = cast(k)
newobj[key] = cast_keys(v, cast=cast, encoding=encoding)
elif isinstance(o, (tuple, set, list)):
newobj = []
for item in o:
newobj.append(cast_keys(item, cast=cast, encoding=encoding))
if isinstance(o, tuple):
newobj = tuple(newobj)
elif isinstance(o, set):
newobj = set(newobj)
else:
# no string cast (unknown object)
newobj = o
return newobj
def loads_json(o, unicode_keys=True, **kwargs):
# deserialize a json string
result = json_parser.loads(o, **kwargs)
if not unicode_keys:
# filter non-str keys in dictionary objects
result = cast_keys(result,
encoding=kwargs.get("encoding", "utf-8"))
return result
def custom_json(o):
if hasattr(o, 'custom_json') and callable(o.custom_json):
return o.custom_json()
if isinstance(o, (datetime.date,
datetime.datetime,
datetime.time)):
return o.isoformat()[:19].replace('T', ' ')
elif isinstance(o, (int, long)):
return int(o)
elif isinstance(o, decimal.Decimal):
return str(o)
elif isinstance(o, lazyT):
return str(o)
elif isinstance(o, XmlComponent):
return str(o)
elif isinstance(o, set):
return list(o)
elif hasattr(o, 'as_list') and callable(o.as_list):
return o.as_list()
elif hasattr(o, 'as_dict') and callable(o.as_dict):
return o.as_dict()
else:
raise TypeError(repr(o) + " is not JSON serializable")
def xml_rec(value, key, quote=True):
if hasattr(value, 'custom_xml') and callable(value.custom_xml):
return value.custom_xml()
elif isinstance(value, (dict, Storage)):
return TAG[key](*[TAG[k](xml_rec(v, '', quote))
for k, v in value.items()])
elif isinstance(value, list):
return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])
elif hasattr(value, 'as_list') and callable(value.as_list):
return str(xml_rec(value.as_list(), '', quote))
elif hasattr(value, 'as_dict') and callable(value.as_dict):
return str(xml_rec(value.as_dict(), '', quote))
else:
return xmlescape(value, quote)
def xml(value, encoding='UTF-8', key='document', quote=True):
return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value, key, quote))
def json(value, default=custom_json):
if decimal_false_option:
value = json_parser.dumps(value, default=default, use_decimal=False)
else:
value = json_parser.dumps(value, default=default)
# replace JavaScript incompatible spacing
# http://timelessrepo.com/json-isnt-a-javascript-subset
return value.replace(ur'\u2028', '\\u2028').replace(ur'\2029', '\\u2029')
def csv(value):
return ''
def ics(events, title=None, link=None, timeshift=0, calname=True,
**ignored):
title = title or '(unknown)'
if link and not callable(link):
link = lambda item, prefix=link: prefix.replace(
'[id]', str(item['id']))
s = 'BEGIN:VCALENDAR'
s += '\nVERSION:2.0'
if not calname is False:
s += '\nX-WR-CALNAME:%s' % (calname or title)
s += '\nSUMMARY:%s' % title
s += '\nPRODID:Generated by web2py'
s += '\nCALSCALE:GREGORIAN'
s += '\nMETHOD:PUBLISH'
for item in events:
s += '\nBEGIN:VEVENT'
s += '\nUID:%s' % item['id']
if link:
s += '\nURL:%s' % link(item)
shift = datetime.timedelta(seconds=3600 * timeshift)
start = item['start_datetime'] + shift
stop = item['stop_datetime'] + shift
s += '\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')
s += '\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')
s += '\nSUMMARY:%s' % item['title']
s += '\nEND:VEVENT'
s += '\nEND:VCALENDAR'
return s
def safe_encode(text):
if not isinstance(text, (str, unicode)):
text = str(text)
try:
text = text.encode('utf8','replace')
except ValueError:
new_text = ''
for c in text:
try:
new_text += c.encode('utf8')
except:
new_text += '?'
text = new_text
return text
def rss(feed):
if not 'entries' in feed and 'items' in feed:
feed['entries'] = feed['items']
def safestr(obj, key, default=''):
return safe_encode(obj.get(key,''))
now = datetime.datetime.now()
rss = rss2.RSS2(title=safestr(feed,'title'),
link=safestr(feed,'link'),
description=safestr(feed,'description'),
lastBuildDate=feed.get('created_on', now),
items=[rss2.RSSItem(
title=safestr(entry,'title','(notitle)'),
link=safestr(entry,'link'),
description=safestr(entry,'description'),
pubDate=entry.get('created_on', now)
) for entry in feed.get('entries', [])])
return rss.to_xml(encoding='utf8')
def yaml(data):
if have_yaml:
return yamlib.dump(data)
else:
raise ImportError("No YAML serializer available")
def loads_yaml(data):
if have_yaml:
return yamlib.load(data)
else:
raise ImportError("No YAML serializer available")
|
|
# ccm clusters
from six import print_, iteritems
from six.moves import xrange
import yaml
import os
import subprocess
import shutil
import time
from ccmlib import common, repository
from ccmlib.node import Node, NodeError
from ccmlib.bulkloader import BulkLoader
class Cluster(object):
def __init__(self, path, name, partitioner=None, install_dir=None, create_directory=True, version=None, verbose=False, **kwargs):
self.name = name
self.nodes = {}
self.seeds = []
self.partitioner = partitioner
self._config_options = {}
self._dse_config_options = {}
self.__log_level = "INFO"
self.__path = path
self.__version = None
self.use_vnodes = False
# Classes that are to follow the respective logging level
self._debug = []
self._trace = []
if self.name.lower() == "current":
raise RuntimeError("Cannot name a cluster 'current'.")
##This is incredibly important for
##backwards compatibility.
if 'cassandra_version' in kwargs:
version = kwargs['cassandra_version']
if 'cassandra_dir' in kwargs:
install_dir = kwargs['cassandra_dir']
if create_directory:
# we create the dir before potentially downloading to throw an error sooner if need be
os.mkdir(self.get_path())
try:
if version is None:
# at this point, install_dir should always not be None, but
# we keep this for backward compatibility (in loading old cluster)
if install_dir is not None:
if common.is_win():
self.__install_dir = install_dir
else:
self.__install_dir = os.path.abspath(install_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = self.load_from_repository(version, verbose)
self.__install_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
if create_directory:
common.validate_install_dir(self.__install_dir)
self._update_config()
except:
if create_directory:
common.rmdirs(self.get_path())
raise
def load_from_repository(self, version, verbose):
return repository.setup(version, verbose)
def set_partitioner(self, partitioner):
self.partitioner = partitioner
self._update_config()
return self
def set_install_dir(self, install_dir=None, version=None, verbose=False):
if version is None:
self.__install_dir = install_dir
common.validate_install_dir(install_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = repository.setup(version, verbose)
self.__install_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
self._update_config()
for node in list(self.nodes.values()):
node.import_config_files()
# if any nodes have a data center, let's update the topology
if any( [node.data_center for node in self.nodes.values()] ):
self.__update_topology_files()
return self
def get_install_dir(self):
common.validate_install_dir(self.__install_dir)
return self.__install_dir
def hasOpscenter(self):
return False
def nodelist(self):
return [ self.nodes[name] for name in sorted(self.nodes.keys()) ]
def version(self):
return self.__version
def cassandra_version(self):
return self.version()
def add(self, node, is_seed, data_center=None):
if node.name in self.nodes:
raise common.ArgumentError('Cannot create existing node %s' % node.name)
self.nodes[node.name] = node
if is_seed:
self.seeds.append(node)
self._update_config()
node.data_center = data_center
node.set_log_level(self.__log_level)
for debug_class in self._debug:
node.set_log_level("DEBUG", debug_class)
for trace_class in self._trace:
node.set_log_level("TRACE", trace_class)
if data_center is not None:
self.__update_topology_files()
node._save()
return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.', ipformat=None):
node_count = nodes
dcs = []
self.use_vnodes = use_vnodes
if isinstance(nodes, list):
self.set_configuration_options(values={'endpoint_snitch' : 'org.apache.cassandra.locator.PropertyFileSnitch'})
node_count = 0
i = 0
for c in nodes:
i = i + 1
node_count = node_count + c
for x in xrange(0, c):
dcs.append('dc%d' % i)
if node_count < 1:
raise common.ArgumentError('invalid node count %s' % nodes)
for i in xrange(1, node_count + 1):
if 'node%s' % i in list(self.nodes.values()):
raise common.ArgumentError('Cannot create existing node node%s' % i)
if tokens is None and not use_vnodes:
if dcs is None or len(dcs) <= 1:
tokens = self.balanced_tokens(node_count)
else:
tokens = self.balanced_tokens_across_dcs(dcs)
if not ipformat:
ipformat = ipprefix+"%d"
for i in xrange(1, node_count + 1):
tk = None
if tokens is not None and i-1 < len(tokens):
tk = tokens[i-1]
dc = dcs[i-1] if i-1 < len(dcs) else None
binary = None
if self.cassandra_version() >= '1.2':
binary = (ipformat % i, 9042)
node = self.create_node('node%s' % i,
False,
(ipformat % i, 9160),
(ipformat % i, 7000),
str(7000 + i * 100),
(str(0), str(2000 + i * 100))[debug == True],
tk,
binary_interface=binary)
self.add(node, True, dc)
self._update_config()
return self
def create_node(self, name, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None):
return Node(name, self, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface)
def balanced_tokens(self, node_count):
if self.cassandra_version() >= '1.2' and not self.partitioner:
ptokens = [(i*(2**64//node_count)) for i in xrange(0, node_count)]
return [int(t - 2**63) for t in ptokens]
return [ int(i*(2**127//node_count)) for i in range(0, node_count) ]
def balanced_tokens_across_dcs(self, dcs):
tokens = []
current_dc = dcs[0]
count = 0
dc_count = 0
for dc in dcs:
if dc == current_dc:
count += 1
else:
new_tokens = [tk+(dc_count*100) for tk in self.balanced_tokens(count)]
tokens.extend(new_tokens)
current_dc = dc
count = 1
dc_count += 1
new_tokens = [tk+(dc_count*100) for tk in self.balanced_tokens(count)]
tokens.extend(new_tokens)
return tokens
def remove(self, node=None):
if node is not None:
if not node.name in self.nodes:
return
del self.nodes[node.name]
if node in self.seeds:
self.seeds.remove(node)
self._update_config()
node.stop(gently=False)
common.rmdirs(node.get_path())
else:
self.stop(gently=False)
common.rmdirs(self.get_path())
def clear(self):
self.stop()
for node in list(self.nodes.values()):
node.clear()
def get_path(self):
return os.path.join(self.__path, self.name)
def get_seeds(self):
return [ s.network_interfaces['storage'][0] for s in self.seeds ]
def show(self, verbose):
msg = "Cluster: '%s'" % self.name
print_(msg)
print_('-'*len(msg))
if len(list(self.nodes.values())) == 0:
print_("No node in this cluster yet")
return
for node in list(self.nodes.values()):
if (verbose):
node.show(show_cluster=False)
print_("")
else:
node.show(only_status=True)
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=False, wait_other_notice=False, jvm_args=[], profile_options=None):
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.nodes.values())]
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
p = node.start(update_pid=False, jvm_args=jvm_args, profile_options=profile_options)
started.append((node, p, mark))
if no_wait and not verbose:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
try:
start_message = "Listening for thrift clients..." if self.cassandra_version() < "2.2" else "Starting listening for CQL clients"
node.watch_log_for(start_message, timeout=60, process=p, verbose=verbose, from_mark=mark)
except RuntimeError:
return None
self.__update_pids(started)
for node, p, _ in started:
if not node.is_running():
raise NodeError("Error starting {0}.".format(node.name), p)
if not no_wait and self.cassandra_version() >= "0.8":
# 0.7 gossip messages seems less predictible that from 0.8 onwards and
# I don't care enough
for node, _, mark in started:
for other_node, _, _ in started:
if other_node is not node:
node.watch_log_for_alive(other_node, from_mark=mark)
if wait_other_notice:
for old_node, mark in marks:
for node, _, _ in started:
if old_node is not node:
old_node.watch_log_for_alive(node, from_mark=mark)
if wait_for_binary_proto and self.version() >= '1.2':
for node, _, mark in started:
node.watch_log_for("Starting listening for CQL clients", process=p, verbose=verbose, from_mark=mark)
time.sleep(0.2)
return started
def stop(self, wait=True, gently=True):
not_running = []
for node in list(self.nodes.values()):
if not node.stop(wait, gently=gently):
not_running.append(node)
return not_running
def set_log_level(self, new_level, class_names=None):
known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ]
if new_level not in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
if class_names:
for class_name in class_names:
if new_level == 'DEBUG':
if class_name in self._trace:
raise common.ArgumentError("Class %s already in TRACE" % (class_name))
self._debug.append(class_name)
if new_level == 'TRACE':
if class_name in self._debug:
raise common.ArgumentError("Class %s already in DEBUG" % (class_name))
self._trace.append(class_name)
else:
self.__log_level = new_level
self._update_config()
for node in self.nodelist():
for class_name in class_names:
node.set_log_level(new_level, class_name)
def nodetool(self, nodetool_cmd):
for node in list(self.nodes.values()):
if node.is_running():
node.nodetool(nodetool_cmd)
return self
def stress(self, stress_options):
stress = common.get_stress_bin(self.get_install_dir())
livenodes = [ node.network_interfaces['storage'][0] for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
print_("No live node")
return
if self.cassandra_version() <= '2.1':
args = [ stress, '-d', ",".join(livenodes) ] + stress_options
else:
args = [ stress ] + stress_options + ['-node', ','.join(livenodes) ]
try:
# need to set working directory for env on Windows
if common.is_win():
subprocess.call(args, cwd=common.parse_path(stress))
else:
subprocess.call(args)
except KeyboardInterrupt:
pass
return self
def run_cli(self, cmds=None, show_output=False, cli_options=[]):
livenodes = [ node for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
raise common.ArgumentError("No live node")
livenodes[0].run_cli(cmds, show_output, cli_options)
def set_configuration_options(self, values=None, batch_commitlog=None):
if values is not None:
for k, v in iteritems(values):
self._config_options[k] = v
if batch_commitlog is not None:
if batch_commitlog:
self._config_options["commitlog_sync"] = "batch"
self._config_options["commitlog_sync_batch_window_in_ms"] = 5
self._config_options["commitlog_sync_period_in_ms"] = None
else:
self._config_options["commitlog_sync"] = "periodic"
self._config_options["commitlog_sync_period_in_ms"] = 10000
self._config_options["commitlog_sync_batch_window_in_ms"] = None
self._update_config()
for node in list(self.nodes.values()):
node.import_config_files()
self.__update_topology_files()
return self
def set_dse_configuration_options(self, values=None):
raise common.ArgumentError('Cannot set DSE configuration options on a Cassandra cluster')
def flush(self):
self.nodetool("flush")
def compact(self):
self.nodetool("compact")
def drain(self):
self.nodetool("drain")
def repair(self):
self.nodetool("repair")
def cleanup(self):
self.nodetool("cleanup")
def decommission(self):
for node in list(self.nodes.values()):
if node.is_running():
node.decommission()
def removeToken(self, token):
self.nodetool("removeToken " + str(token))
def bulkload(self, options):
loader = BulkLoader(self)
loader.load(options)
def scrub(self, options):
for node in list(self.nodes.values()):
node.scrub(options)
def verify(self, options):
for node in list(self.nodes.values()):
node.verify(options)
def update_log4j(self, new_log4j_config):
# iterate over all nodes
for node in self.nodelist():
node.update_log4j(new_log4j_config)
def update_logback(self, new_logback_config):
# iterate over all nodes
for node in self.nodelist():
node.update_logback(new_logback_config)
def __get_version_from_build(self):
return common.get_version_from_build(self.get_install_dir())
def _update_config(self):
node_list = [ node.name for node in list(self.nodes.values()) ]
seed_list = [ node.name for node in self.seeds ]
filename = os.path.join(self.__path, self.name, 'cluster.conf')
with open(filename, 'w') as f:
yaml.safe_dump({
'name' : self.name,
'nodes' : node_list,
'seeds' : seed_list,
'partitioner' : self.partitioner,
'install_dir' : self.__install_dir,
'config_options' : self._config_options,
'dse_config_options' : self._dse_config_options,
'log_level' : self.__log_level,
'use_vnodes' : self.use_vnodes
}, f)
def __update_pids(self, started):
for node, p, _ in started:
node._update_pid(p)
def __update_topology_files(self):
dcs = [('default', 'dc1')]
for node in self.nodelist():
if node.data_center is not None:
dcs.append((node.address(), node.data_center))
content = ""
for k, v in dcs:
content = "%s%s=%s:r1\n" % (content, k, v)
for node in self.nodelist():
topology_file = os.path.join(node.get_conf_dir(), 'cassandra-topology.properties')
with open(topology_file, 'w') as f:
f.write(content)
def enable_ssl(self, ssl_path, require_client_auth):
shutil.copyfile(os.path.join(ssl_path, 'keystore.jks'), os.path.join(self.get_path(), 'keystore.jks'))
shutil.copyfile(os.path.join(ssl_path, 'cassandra.crt'), os.path.join(self.get_path(), 'cassandra.crt'))
ssl_options = {'enabled' : True,
'keystore' : os.path.join(self.get_path(), 'keystore.jks'),
'keystore_password' : 'cassandra'
}
# determine if truststore client encryption options should be enabled
truststore_file = os.path.join(ssl_path, 'truststore.jks')
if os.path.isfile(truststore_file):
shutil.copyfile(truststore_file, os.path.join(self.get_path(), 'truststore.jks'))
truststore_ssl_options = {'require_client_auth' : require_client_auth,
'truststore' : os.path.join(self.get_path(), 'truststore.jks'),
'truststore_password' : 'cassandra'
}
ssl_options.update(truststore_ssl_options)
self._config_options['client_encryption_options'] = ssl_options
self._update_config()
def enable_internode_ssl(self, node_ssl_path):
shutil.copyfile(os.path.join(node_ssl_path, 'keystore.jks'), os.path.join(self.get_path(), 'internode-keystore.jks'))
shutil.copyfile(os.path.join(node_ssl_path, 'truststore.jks'), os.path.join(self.get_path(), 'internode-truststore.jks'))
node_ssl_options = {
'internode_encryption': 'all',
'keystore': os.path.join(self.get_path(), 'internode-keystore.jks'),
'keystore_password': 'cassandra',
'truststore': os.path.join(self.get_path(), 'internode-truststore.jks'),
'truststore_password': 'cassandra'
}
self._config_options['server_encryption_options'] = node_ssl_options
self._update_config()
|
|
# Copyright 2011 Gilt Groupe, INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
mothership.zabbix
for interacting with the zabbix API
requires a third-party download of the zabbix API!
see INSTALL
"""
import sys
import mothership
# Useful handy functions brought in from mothership
from mothership.kv import collect as kv_collect
from mothership.kv import select as kv_select
# All of the models and sqlalchemy are brought in
# to simplify referencing
from mothership.mothership_models import *
def add(cfg, unqdn, zs_unqdn, zabbix_template):
"""
[description]
add a server to Zabbix
[parameter info]
required:
cfg: the config object. useful everywhere
unqdn: unqualified domain name of the server to add
zs_unqdn: unqdn of the zabbix server
zabbix_template: zabbix template to link the server to
[return value]
no explicit return
"""
# check to see if zabbix is enabled in the config
if cfg.zab_active == False:
print "Zabbix is not active, skipping. Zabbix can be enabled in the mothership config file."
return
else:
# Import Zabbix API
from zabbix_api2 import ZabbixAPI, ZabbixAPIException
# stitch together some info about our host
host,realm,site_id = mothership.split_fqdn(unqdn)
fqdn = '.'.join([unqdn,cfg.domain])
if zs_unqdn==None:
zs_unqdn, zs_user, zs_pass = get_default_server(cfg, realm, site_id)
else:
zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')[1]
zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')[1]
zs_host,zs_realm,zs_site_id = mothership.split_fqdn(zs_unqdn)
zs_fqdn = '.'.join([zs_unqdn,cfg.domain])
# set the zabbix host and url path, log in
try:
# uncomment to debug
#zapi = ZabbixAPI(zs_fqdn,'/', debug_level=6)
zapi = ZabbixAPI(zs_fqdn,'/')
zapi.login(zs_user, zs_pass)
# uncomment to debug
#print "Zabbix API Version: %s" % zapi.api_version()
#print "Logged in: %s" % str(zapi.test_login())
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
# get server info
s, n, h = mothership.snh(cfg, host, realm, site_id)
# Construct the tag template name.
# All templates descend from the default
discard,zab_def_tmpl = str(kv_select(cfg, '', key="zabbix_default_template")).split('=')
# uncomment to debug
#print 'Default template is: ' + zab_def_tmpl
zab_tag_tmpl = zab_def_tmpl + '_' + s.tag
# get the templateid from zabbix
zab_tid = None
t = zapi.template.get(host=zab_tag_tmpl)
if t:
for k in t.keys():
zab_tid = t[k]['templateid']
else:
pass
# if we're given a template, try and use that
if zabbix_template:
t = zapi.template.get(host=zabbix_template)
if t:
for k in t.keys():
zab_tid = t[k]['templateid']
print "Found template for: " + zabbix_template + ", templateid: " + zab_tid
# if template we were given doesn't exist, try the tag template
elif zab_tid:
print "No template found for: " + zabbix_template + ", trying tag template for \"" + zab_tag_tmpl + "\""
print "Found tag template for: " + zab_tag_tmpl + ", templateid: " + zab_tid
else:
print "No template found for: " + zabbix_template + ", trying tag template for \"" + zab_tag_tmpl + "\""
print "No tag template found for: " + zab_tag_tmpl + ", creating one and linking to the default: "+zab_def_tmpl
# we'll need the hostgroup ID for the "Templates" group
tgid = None
hg = zapi.hostgroup.get(filter={'name':'Templates'})
if hg:
for k in hg:
tgid = k['groupid']
else:
print "Templates group not found! Fix Zabbix"
if tgid:
print "Templates group id: "+tgid
else:
print "Templates goup id is empty, something went wrong"
# Get the template ID for the default template
discard,zab_def_tmpl = str(kv_select(cfg, '', key="zabbix_default_template")).split('=')
t = zapi.template.get(host=zab_def_tmpl)
if t:
for k in t.keys():
zab_tid = t[k]['templateid']
print "Found default template: " + zab_def_tmpl + ", templateid: " + zab_tid
else:
print "No default template! Check mothership's KV"
return
# Create the tag template, assign it to the default Templates group, link to the default template
t = zapi.template.create(host=zab_tag_tmpl, groups={'groupid':tgid}, templates={'templateid':zab_tid})
print "Created template: "+zab_tag_tmpl+" with template ID: "+t['templateids'][0]
zab_tid = t['templateids'][0]
# if no template is given and if one exists, use the tag template
# this is constructed automatically from the tag name
elif zab_tid:
print "No template supplied, trying tag template for \"" + zab_tag_tmpl + "\""
print "Found tag template for: " + zab_tag_tmpl + ", templateid: " + zab_tid
# if all else fails, create a tag template linked to the default template
else:
print "No tag template found for: " + zab_tag_tmpl + ", creating one and linking to the default: "+zab_def_tmpl
# we'll need the hostgroup ID for the "Templates" group
tgid = None
hg = zapi.hostgroup.get(filter={'name':'Templates'})
if hg:
tgid = hg[0]['groupid']
else:
print "Templates group not found! Fix Zabbix"
if tgid:
print "Templates group id: "+tgid
else:
print "Templates goup id is empty, something went wrong"
# Get the template ID for the default template
discard,zab_def_tmpl = str(kv_select(cfg, '', key="zabbix_default_template")).split('=')
t = zapi.template.get(host=zab_def_tmpl)
if t:
for k in t.keys():
zab_tid = t[k]['templateid']
print "Found default template: " + zab_def_tmpl + ", templateid: " + zab_tid
else:
print "No default template! Check mothership's KV"
return
# Create the tag template, assign it to the default Templates group, link to the default template
t = zapi.template.create(host=zab_tag_tmpl, groups={'groupid':tgid}, templates={'templateid':zab_tid})
print "Created template: "+zab_tag_tmpl+" with template ID: "+t['templateids'][0]
zab_tid = t['templateids'][0]
# Check to see if the tag has a group. if not, create it
zab_tag_group = []
a_tags = kv_collect(cfg, unqdn, key='tag')
t = zapi.hostgroup.get(filter={'name':s.tag})
if t:
print "Found group "+s.tag+" adding "+unqdn+" to it"
zab_tag_group.append(t)
else:
print "No group found for "+s.tag+", creating it and adding "+unqdn+" to it"
hgid = zapi.hostgroup.create(name=s.tag)['groupids'][0]
zab_tag_group.append(hgid)
# check to see if the ancillary tags have groups. if not, create them
for tag_kv in a_tags:
discard,r = str(tag_kv).split('=')
t = zapi.hostgroup.get(filter={'name':r})
if t:
print "Found group "+r+" adding "+unqdn+" to it"
zab_tag_group.append(t)
else:
print "No group found for "+r+", creating it and adding "+unqdn+" to it"
hgid = zapi.hostgroup.create(name=r)['groupids'][0]
zab_tag_group.append(hgid)
# Insert the host into zabbix
try:
zapi.host.create(host=unqdn, dns=fqdn, groups=zab_tag_group, templates=[zab_tid], port='10050')
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
def remove(cfg, unqdn, zs_unqdn):
"""
[description]
Delete a server *completely* from zabbix.
removes all graphs and data items from the DB
[parameter info]
required:
cfg: the config object. useful everywhere
unqdn: unqualified domain name of the server to delete
zs_unqdn: unqdn of the zabbix server
[return value]
no explicit return
"""
# check to see if zabbix is enabled in the config
if cfg.zab_active == False:
print "Zabbix is not active, skipping. Zabbix can be enabled in the mothership config file."
return
else:
pass
# stitch together some info about our host
host,realm,site_id = mothership.split_fqdn(unqdn)
if zs_unqdn==None:
zs_unqdn, zs_user, zs_pass = get_default_server(cfg, realm, site_id)
else:
zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')[1]
zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')[1]
zs_host,zs_realm,zs_site_id = mothership.split_fqdn(zs_unqdn)
zs_fqdn = '.'.join([zs_unqdn,cfg.domain])
# set the zabbix host and url path, log in
try:
# uncomment to debug
#zapi = ZabbixAPI(zs_fqdn,'/', debug_level=6)
zapi = ZabbixAPI(zs_fqdn,'/')
zapi.login(zs_user, zs_pass)
# uncomment to debug
#print "Zabbix API Version: %s" % zapi.api_version()
#print "Logged in: %s" % str(zapi.test_login())
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
try:
t = zapi.host.get(filter={'host':unqdn})
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
hid = None
if t:
hid = t['hostid']
print unqdn+" found, host id is: " + hid
else:
print "Host not found: " + unqdn
if hid:
print '\n********************************************************************'
print '* ACHTUNG! This will remove all data associated with this machine! *'
print '* To stop taking data but keep all graphs, use "--disable" *'
print '********************************************************************\n'
ans = raw_input('To continue deleting, please type "delete_%s": ' % host)
if ans != 'delete_%s' % host:
print 'Remove server aborted'
else:
zapi.host.delete([hid])
print "Completely removing host "+host+" with ID: "+hid
else:
print "Host ID is empty, aborting"
def enable(cfg, unqdn, zs_unqdn):
"""
[description]
enable a server within zabbix
[parameter info]
required:
cfg: the config object. useful everywhere
unqdn: unqualified domain name of the server to enable
zs_unqdn: unqdn of the zabbix server
[return value]
no explicit return
"""
# check to see if zabbix is enabled in the config
if cfg.zab_active == False:
print "Zabbix is not active, skipping. Zabbix can be enabled in the mothership config file."
return
else:
pass
# stitch together some info about our host
host,realm,site_id = mothership.split_fqdn(unqdn)
if zs_unqdn==None:
zs_unqdn, zs_user, zs_pass = get_default_server(cfg, realm, site_id)
else:
zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')[1]
zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')[1]
zs_host,zs_realm,zs_site_id = mothership.split_fqdn(zs_unqdn)
zs_fqdn = '.'.join([zs_unqdn,cfg.domain])
# set the zabbix host and url path, log in
try:
# uncomment to debug
#zapi = ZabbixAPI(zs_fqdn,'/', debug_level=6)
zapi = ZabbixAPI(zs_fqdn,'/')
zapi.login(zs_user, zs_pass)
# uncomment to debug
#print "Zabbix API Version: %s" % zapi.api_version()
#print "Logged in: %s" % str(zapi.test_login())
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
# get host info
t = zapi.host.get(filter={'host':unqdn}, output='extend')
hid = None
if t:
hid = t['hostid']
hstatus = t['status']
else:
print "Host not found: " + unqdn
# enable only if it's disabled, let the user know
if hstatus == '0':
print "Host is already enabled"
return
else:
print unqdn+" is disabled, enabling"
zapi.host.update(hostid=hid, status='0')
def disable(cfg, unqdn, zs_unqdn):
"""
[description]
disable a server within zabbix
[parameter info]
required:
cfg: the config object. useful everywhere
unqdn: unqualified domain name of the server to disable
zs_unqdn: unqdn of the zabbix server
[return value]
no explicit return
"""
# just making sure something is assigned to it...
hstatus = None
# check to see if zabbix is enabled in the config
if cfg.zab_active == False:
print "Zabbix is not active, skipping. Zabbix can be enabled in the mothership config file."
return
else:
pass
# stitch together some info about our host
host,realm,site_id = mothership.split_fqdn(unqdn)
if zs_unqdn==None:
zs_unqdn, zs_user, zs_pass = get_default_server(cfg, realm, site_id)
else:
zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')[1]
zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')[1]
zs_host,zs_realm,zs_site_id = mothership.split_fqdn(zs_unqdn)
zs_fqdn = '.'.join([zs_unqdn,cfg.domain])
# set the zabbix host and url path, log in
try:
# uncomment to debug
#zapi = ZabbixAPI(zs_fqdn,'/', debug_level=6)
zapi = ZabbixAPI(zs_fqdn,'/')
zapi.login(zs_user, zs_pass)
# uncomment to debug
#print "Zabbix API Version: %s" % zapi.api_version()
#print "Logged in: %s" % str(zapi.test_login())
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
# get host info
t = zapi.host.get(filter={'host':unqdn}, output='extend')
hid = None
if t:
hid = t['hostid']
hstatus = t['status']
else:
print "Host not found: " + unqdn
# disable only if it's enabled, let the user know
if hstatus != '0':
print unqdn+" is already disabled"
return
else:
print "Disabling %s in zabbix. to completely delete all data use \"ship zbx -r\"" % unqdn
zapi.host.update(hostid=hid, status='1')
# Display info about a host in zabbix
def display(cfg, unqdn, zs_unqdn):
"""
[description]
display a server's zabbix info
[parameter info]
required:
cfg: the config object. useful everywhere
unqdn: unqualified domain name of the server to display
zs_unqdn: unqdn of the zabbix server
[return value]
no explicit return
"""
# check to see if zabbix is enabled in the config
if cfg.zab_active == False:
print "Zabbix is not active, skipping. Zabbix can be enabled in the mothership config file."
return
else:
pass
# stitch together some info about our host
host,realm,site_id = mothership.split_fqdn(unqdn)
if zs_unqdn==None:
zs_unqdn, zs_user, zs_pass = get_default_server(cfg, realm, site_id)
else:
zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')[1]
zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')[1]
zs_host,zs_realm,zs_site_id = mothership.split_fqdn(zs_unqdn)
zs_fqdn = '.'.join([zs_unqdn,cfg.domain])
# set the zabbix host and url path, log in
try:
# uncomment to debug
#zapi = ZabbixAPI(zs_fqdn,'/', debug_level=6)
zapi = ZabbixAPI(zs_fqdn,'/')
zapi.login(zs_user, zs_pass)
# uncomment to debug
#print "Zabbix API Version: %s" % zapi.api_version()
#print "Logged in: %s" % str(zapi.test_login())
except ZabbixAPIException, e:
sys.stderr.write(str(e) + '\n')
# get server,network,hardware info
s, n, h = mothership.snh(cfg, host, realm, site_id)
# get group id for group "Templates"
tgid = None
hg = zapi.hostgroup.get(filter={'name':'Templates'})
if hg:
tgid = t['groupid']
else:
print "Templates group not found! Fix Zabbix"
if tgid:
print "Templates group id: "+tgid
else:
print "Templates goup id is empty, something went wrong"
# get default template info
discard,zab_def_tmpl = str(kv_select(cfg, '', key="zabbix_default_template")).split('=')
t = zapi.template.get(host=zab_def_tmpl)
if t:
for k in t.keys():
zab_tid = t[k]['templateid']
print "Found default template: " + zab_def_tmpl + ", templateid: " + zab_tid
else:
print "No default template! Check mothership's KV"
# get tag template id if exists
tname = zab_def_tmpl+s.tag
t = zapi.template.get(host=tname)
if t:
for k in t:
tid = t[k]['templateid']
print "Template "+tname+" found, ID: "+tid
else:
print "Template not found: "+tname
# get host status
t = zapi.host.get(filter={'host':unqdn}, output='extend')
hid = None
if t:
hid = t['hostid']
hstatus = t['status']
if hstatus == '0':
print unqdn+" id: "+hid+" status: "+hstatus+" (enabled)"
else:
print unqdn+" id: "+hid+" status: "+hstatus+" (disabled)"
else:
print "Host not found: " + unqdn
def get_default_server(cfg, realm, site_id):
"""
[description]
retrieves and returns the default server, user, and pass for zabbix in whatever the realm.site_id is its given
[parameter info]
required:
cfg: the config object. useful everywhere
realm: realm to return server data for
site_id: site_id to return server data for
[return value]
returns zs_unqdn, zs_user, zs_pass as a list
"""
serv = cfg.dbsess.query(Server).\
filter(Server.tag=="zabbix_server").\
filter(Server.realm==realm).\
filter(Server.site_id==site_id).\
first()
zs_unqdn = '.'.join([serv.hostname,realm,site_id])
discard,zs_user = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_user")).split('=')
discard,zs_pass = str(kv_select(cfg, zs_unqdn, key="zabbix_admin_pass")).split('=')
retval = [zs_unqdn, zs_user, zs_pass]
return retval
|
|
"""Alexa message handlers."""
import logging
import math
from homeassistant import core as ha
from homeassistant.components import (
camera,
cover,
fan,
group,
input_number,
light,
media_player,
timer,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_LOCK,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_ALARM_DISARMED,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import network
import homeassistant.util.color as color_util
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_MODES_CUSTOM,
API_THERMOSTAT_PRESETS,
PERCENTAGE_FAN_MAP,
Cause,
Inputs,
)
from .entities import async_get_entities
from .errors import (
AlexaInvalidDirectiveError,
AlexaInvalidValueError,
AlexaSecurityPanelAuthorizationRequired,
AlexaSecurityPanelUnauthorizedError,
AlexaTempRangeError,
AlexaUnsupportedThermostatModeError,
AlexaVideoActionNotPermittedForContentError,
)
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
HANDLERS = Registry()
@HANDLERS.register(("Alexa.Discovery", "Discover"))
async def async_api_discovery(hass, config, directive, context):
"""Create a API formatted discovery response.
Async friendly.
"""
discovery_endpoints = [
alexa_entity.serialize_discovery()
for alexa_entity in async_get_entities(hass, config)
if config.should_expose(alexa_entity.entity_id)
]
return directive.response(
name="Discover.Response",
namespace="Alexa.Discovery",
payload={"endpoints": discovery_endpoints},
)
@HANDLERS.register(("Alexa.Authorization", "AcceptGrant"))
async def async_api_accept_grant(hass, config, directive, context):
"""Create a API formatted AcceptGrant response.
Async friendly.
"""
auth_code = directive.payload["grant"]["code"]
_LOGGER.debug("AcceptGrant code: %s", auth_code)
if config.supports_auth:
await config.async_accept_grant(auth_code)
if config.should_report_state:
await async_enable_proactive_mode(hass, config)
return directive.response(
name="AcceptGrant.Response", namespace="Alexa.Authorization", payload={}
)
@HANDLERS.register(("Alexa.PowerController", "TurnOn"))
async def async_api_turn_on(hass, config, directive, context):
"""Process a turn on request."""
entity = directive.entity
domain = entity.domain
if domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_ON
if domain == cover.DOMAIN:
service = cover.SERVICE_OPEN_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if not supported & vacuum.SUPPORT_TURN_ON and supported & vacuum.SUPPORT_START:
service = vacuum.SERVICE_START
elif domain == timer.DOMAIN:
service = timer.SERVICE_START
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_PLAY
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.PowerController", "TurnOff"))
async def async_api_turn_off(hass, config, directive, context):
"""Process a turn off request."""
entity = directive.entity
domain = entity.domain
if entity.domain == group.DOMAIN:
domain = ha.DOMAIN
service = SERVICE_TURN_OFF
if entity.domain == cover.DOMAIN:
service = cover.SERVICE_CLOSE_COVER
elif domain == vacuum.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if (
not supported & vacuum.SUPPORT_TURN_OFF
and supported & vacuum.SUPPORT_RETURN_HOME
):
service = vacuum.SERVICE_RETURN_TO_BASE
elif domain == timer.DOMAIN:
service = timer.SERVICE_CANCEL
elif domain == media_player.DOMAIN:
supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
power_features = media_player.SUPPORT_TURN_ON | media_player.SUPPORT_TURN_OFF
if not supported & power_features:
service = media_player.SERVICE_MEDIA_STOP
await hass.services.async_call(
domain,
service,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "SetBrightness"))
async def async_api_set_brightness(hass, config, directive, context):
"""Process a set brightness request."""
entity = directive.entity
brightness = int(directive.payload["brightness"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.BrightnessController", "AdjustBrightness"))
async def async_api_adjust_brightness(hass, config, directive, context):
"""Process an adjust brightness request."""
entity = directive.entity
brightness_delta = int(directive.payload["brightnessDelta"])
# read current state
try:
current = math.floor(
int(entity.attributes.get(light.ATTR_BRIGHTNESS)) / 255 * 100
)
except ZeroDivisionError:
current = 0
# set brightness
brightness = max(0, brightness_delta + current)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_BRIGHTNESS_PCT: brightness},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorController", "SetColor"))
async def async_api_set_color(hass, config, directive, context):
"""Process a set color request."""
entity = directive.entity
rgb = color_util.color_hsb_to_RGB(
float(directive.payload["color"]["hue"]),
float(directive.payload["color"]["saturation"]),
float(directive.payload["color"]["brightness"]),
)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_RGB_COLOR: rgb},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "SetColorTemperature"))
async def async_api_set_color_temperature(hass, config, directive, context):
"""Process a set color temperature request."""
entity = directive.entity
kelvin = int(directive.payload["colorTemperatureInKelvin"])
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_KELVIN: kelvin},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "DecreaseColorTemperature"))
async def async_api_decrease_color_temp(hass, config, directive, context):
"""Process a decrease color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
max_mireds = int(entity.attributes.get(light.ATTR_MAX_MIREDS))
value = min(max_mireds, current + 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.ColorTemperatureController", "IncreaseColorTemperature"))
async def async_api_increase_color_temp(hass, config, directive, context):
"""Process an increase color temperature request."""
entity = directive.entity
current = int(entity.attributes.get(light.ATTR_COLOR_TEMP))
min_mireds = int(entity.attributes.get(light.ATTR_MIN_MIREDS))
value = max(min_mireds, current - 50)
await hass.services.async_call(
entity.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id, light.ATTR_COLOR_TEMP: value},
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.SceneController", "Activate"))
async def async_api_activate(hass, config, directive, context):
"""Process an activate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="ActivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.SceneController", "Deactivate"))
async def async_api_deactivate(hass, config, directive, context):
"""Process a deactivate request."""
entity = directive.entity
domain = entity.domain
await hass.services.async_call(
domain,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
payload = {
"cause": {"type": Cause.VOICE_INTERACTION},
"timestamp": f"{dt_util.utcnow().replace(tzinfo=None).isoformat()}Z",
}
return directive.response(
name="DeactivationStarted", namespace="Alexa.SceneController", payload=payload
)
@HANDLERS.register(("Alexa.PercentageController", "SetPercentage"))
async def async_api_set_percentage(hass, config, directive, context):
"""Process a set percentage request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["percentage"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PercentageController", "AdjustPercentage"))
async def async_api_adjust_percentage(hass, config, directive, context):
"""Process an adjust percentage request."""
entity = directive.entity
percentage_delta = int(directive.payload["percentageDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
elif percentage <= 100:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.LockController", "Lock"))
async def async_api_lock(hass, config, directive, context):
"""Process a lock request."""
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_LOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"name": "lockState", "namespace": "Alexa.LockController", "value": "LOCKED"}
)
return response
@HANDLERS.register(("Alexa.LockController", "Unlock"))
async def async_api_unlock(hass, config, directive, context):
"""Process an unlock request."""
if config.locale not in {"de-DE", "en-US", "ja-JP"}:
msg = f"The unlock directive is not supported for the following locales: {config.locale}"
raise AlexaInvalidDirectiveError(msg)
entity = directive.entity
await hass.services.async_call(
entity.domain,
SERVICE_UNLOCK,
{ATTR_ENTITY_ID: entity.entity_id},
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{"namespace": "Alexa.LockController", "name": "lockState", "value": "UNLOCKED"}
)
return response
@HANDLERS.register(("Alexa.Speaker", "SetVolume"))
async def async_api_set_volume(hass, config, directive, context):
"""Process a set volume request."""
volume = round(float(directive.payload["volume"] / 100), 2)
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.InputController", "SelectInput"))
async def async_api_select_input(hass, config, directive, context):
"""Process a set input request."""
media_input = directive.payload["input"]
entity = directive.entity
# Attempt to map the ALL UPPERCASE payload name to a source.
# Strips trailing 1 to match single input devices.
source_list = entity.attributes.get(media_player.const.ATTR_INPUT_SOURCE_LIST, [])
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
media_input = media_input.lower().replace(" ", "")
if (
formatted_source in Inputs.VALID_SOURCE_NAME_MAP.keys()
and formatted_source == media_input
) or (
media_input.endswith("1") and formatted_source == media_input.rstrip("1")
):
media_input = source
break
else:
msg = (
f"failed to map input {media_input} to a media source on {entity.entity_id}"
)
raise AlexaInvalidValueError(msg)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_INPUT_SOURCE: media_input,
}
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOURCE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.Speaker", "AdjustVolume"))
async def async_api_adjust_volume(hass, config, directive, context):
"""Process an adjust volume request."""
volume_delta = int(directive.payload["volume"])
entity = directive.entity
current_level = entity.attributes.get(media_player.const.ATTR_MEDIA_VOLUME_LEVEL)
# read current state
try:
current = math.floor(int(current_level * 100))
except ZeroDivisionError:
current = 0
volume = float(max(0, volume_delta + current) / 100)
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "AdjustVolume"))
async def async_api_adjust_volume_step(hass, config, directive, context):
"""Process an adjust volume step request."""
# media_player volume up/down service does not support specifying steps
# each component handles it differently e.g. via config.
# This workaround will simply call the volume up/Volume down the amount of steps asked for
# When no steps are called in the request, Alexa sends a default of 10 steps which for most
# purposes is too high. The default is set 1 in this case.
entity = directive.entity
volume_int = int(directive.payload["volumeSteps"])
is_default = bool(directive.payload["volumeStepsDefault"])
default_steps = 1
if volume_int < 0:
service_volume = SERVICE_VOLUME_DOWN
if is_default:
volume_int = -default_steps
else:
service_volume = SERVICE_VOLUME_UP
if is_default:
volume_int = default_steps
data = {ATTR_ENTITY_ID: entity.entity_id}
for _ in range(abs(volume_int)):
await hass.services.async_call(
entity.domain, service_volume, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.StepSpeaker", "SetMute"))
@HANDLERS.register(("Alexa.Speaker", "SetMute"))
async def async_api_set_mute(hass, config, directive, context):
"""Process a set mute request."""
mute = bool(directive.payload["mute"])
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_VOLUME_MUTED: mute,
}
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_MUTE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Play"))
async def async_api_play(hass, config, directive, context):
"""Process a play request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PLAY, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Pause"))
async def async_api_pause(hass, config, directive, context):
"""Process a pause request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_PAUSE, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Stop"))
async def async_api_stop(hass, config, directive, context):
"""Process a stop request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_STOP, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Next"))
async def async_api_next(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain, SERVICE_MEDIA_NEXT_TRACK, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PlaybackController", "Previous"))
async def async_api_previous(hass, config, directive, context):
"""Process a previous request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
await hass.services.async_call(
entity.domain,
SERVICE_MEDIA_PREVIOUS_TRACK,
data,
blocking=False,
context=context,
)
return directive.response()
def temperature_from_object(hass, temp_obj, interval=False):
"""Get temperature from Temperature object in requested unit."""
to_unit = hass.config.units.temperature_unit
from_unit = TEMP_CELSIUS
temp = float(temp_obj["value"])
if temp_obj["scale"] == "FAHRENHEIT":
from_unit = TEMP_FAHRENHEIT
elif temp_obj["scale"] == "KELVIN":
# convert to Celsius if absolute temperature
if not interval:
temp -= 273.15
return convert_temperature(temp, from_unit, to_unit, interval)
@HANDLERS.register(("Alexa.ThermostatController", "SetTargetTemperature"))
async def async_api_set_target_temp(hass, config, directive, context):
"""Process a set target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
data = {ATTR_ENTITY_ID: entity.entity_id}
payload = directive.payload
response = directive.response()
if "targetSetpoint" in payload:
temp = temperature_from_object(hass, payload["targetSetpoint"])
if temp < min_temp or temp > max_temp:
raise AlexaTempRangeError(hass, temp, min_temp, max_temp)
data[ATTR_TEMPERATURE] = temp
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp, "scale": API_TEMP_UNITS[unit]},
}
)
if "lowerSetpoint" in payload:
temp_low = temperature_from_object(hass, payload["lowerSetpoint"])
if temp_low < min_temp or temp_low > max_temp:
raise AlexaTempRangeError(hass, temp_low, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
response.add_context_property(
{
"name": "lowerSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_low, "scale": API_TEMP_UNITS[unit]},
}
)
if "upperSetpoint" in payload:
temp_high = temperature_from_object(hass, payload["upperSetpoint"])
if temp_high < min_temp or temp_high > max_temp:
raise AlexaTempRangeError(hass, temp_high, min_temp, max_temp)
data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
response.add_context_property(
{
"name": "upperSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": temp_high, "scale": API_TEMP_UNITS[unit]},
}
)
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "AdjustTargetTemperature"))
async def async_api_adjust_target_temp(hass, config, directive, context):
"""Process an adjust target temperature request."""
entity = directive.entity
min_temp = entity.attributes.get(climate.ATTR_MIN_TEMP)
max_temp = entity.attributes.get(climate.ATTR_MAX_TEMP)
unit = hass.config.units.temperature_unit
temp_delta = temperature_from_object(
hass, directive.payload["targetSetpointDelta"], interval=True
)
target_temp = float(entity.attributes.get(ATTR_TEMPERATURE)) + temp_delta
if target_temp < min_temp or target_temp > max_temp:
raise AlexaTempRangeError(hass, target_temp, min_temp, max_temp)
data = {ATTR_ENTITY_ID: entity.entity_id, ATTR_TEMPERATURE: target_temp}
response = directive.response()
await hass.services.async_call(
entity.domain,
climate.SERVICE_SET_TEMPERATURE,
data,
blocking=False,
context=context,
)
response.add_context_property(
{
"name": "targetSetpoint",
"namespace": "Alexa.ThermostatController",
"value": {"value": target_temp, "scale": API_TEMP_UNITS[unit]},
}
)
return response
@HANDLERS.register(("Alexa.ThermostatController", "SetThermostatMode"))
async def async_api_set_thermostat_mode(hass, config, directive, context):
"""Process a set thermostat mode request."""
entity = directive.entity
mode = directive.payload["thermostatMode"]
mode = mode if isinstance(mode, str) else mode["value"]
data = {ATTR_ENTITY_ID: entity.entity_id}
ha_preset = next((k for k, v in API_THERMOSTAT_PRESETS.items() if v == mode), None)
if ha_preset:
presets = entity.attributes.get(climate.ATTR_PRESET_MODES, [])
if ha_preset not in presets:
msg = f"The requested thermostat mode {ha_preset} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_PRESET_MODE
data[climate.ATTR_PRESET_MODE] = ha_preset
elif mode == "CUSTOM":
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
custom_mode = directive.payload["thermostatMode"]["customName"]
custom_mode = next(
(k for k, v in API_THERMOSTAT_MODES_CUSTOM.items() if v == custom_mode),
None,
)
if custom_mode not in operation_list:
msg = (
f"The requested thermostat mode {mode}: {custom_mode} is not supported"
)
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = custom_mode
else:
operation_list = entity.attributes.get(climate.ATTR_HVAC_MODES)
ha_modes = {k: v for k, v in API_THERMOSTAT_MODES.items() if v == mode}
ha_mode = next(iter(set(ha_modes).intersection(operation_list)), None)
if ha_mode not in operation_list:
msg = f"The requested thermostat mode {mode} is not supported"
raise AlexaUnsupportedThermostatModeError(msg)
service = climate.SERVICE_SET_HVAC_MODE
data[climate.ATTR_HVAC_MODE] = ha_mode
response = directive.response()
await hass.services.async_call(
climate.DOMAIN, service, data, blocking=False, context=context
)
response.add_context_property(
{
"name": "thermostatMode",
"namespace": "Alexa.ThermostatController",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa", "ReportState"))
async def async_api_reportstate(hass, config, directive, context):
"""Process a ReportState request."""
return directive.response(name="StateReport")
@HANDLERS.register(("Alexa.PowerLevelController", "SetPowerLevel"))
async def async_api_set_power_level(hass, config, directive, context):
"""Process a SetPowerLevel request."""
entity = directive.entity
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = "off"
percentage = int(directive.payload["powerLevel"])
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.PowerLevelController", "AdjustPowerLevel"))
async def async_api_adjust_power_level(hass, config, directive, context):
"""Process an AdjustPowerLevel request."""
entity = directive.entity
percentage_delta = int(directive.payload["powerLevelDelta"])
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == fan.DOMAIN:
service = fan.SERVICE_SET_SPEED
speed = entity.attributes.get(fan.ATTR_SPEED)
current = PERCENTAGE_FAN_MAP.get(speed, 100)
# set percentage
percentage = max(0, percentage_delta + current)
speed = "off"
if percentage <= 33:
speed = "low"
elif percentage <= 66:
speed = "medium"
else:
speed = "high"
data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.SecurityPanelController", "Arm"))
async def async_api_arm(hass, config, directive, context):
"""Process a Security Panel Arm request."""
entity = directive.entity
service = None
arm_state = directive.payload["armState"]
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.state != STATE_ALARM_DISARMED:
msg = "You must disarm the system before you can set the requested arm state."
raise AlexaSecurityPanelAuthorizationRequired(msg)
if arm_state == "ARMED_AWAY":
service = SERVICE_ALARM_ARM_AWAY
elif arm_state == "ARMED_NIGHT":
service = SERVICE_ALARM_ARM_NIGHT
elif arm_state == "ARMED_STAY":
service = SERVICE_ALARM_ARM_HOME
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
# return 0 until alarm integration supports an exit delay
payload = {"exitDelayInSeconds": 0}
response = directive.response(
name="Arm.Response", namespace="Alexa.SecurityPanelController", payload=payload
)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": arm_state,
}
)
return response
@HANDLERS.register(("Alexa.SecurityPanelController", "Disarm"))
async def async_api_disarm(hass, config, directive, context):
"""Process a Security Panel Disarm request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
response = directive.response()
# Per Alexa Documentation: If you receive a Disarm directive, and the system is already disarmed,
# respond with a success response, not an error response.
if entity.state == STATE_ALARM_DISARMED:
return response
payload = directive.payload
if "authorization" in payload:
value = payload["authorization"]["value"]
if payload["authorization"]["type"] == "FOUR_DIGIT_PIN":
data["code"] = value
if not await hass.services.async_call(
entity.domain, SERVICE_ALARM_DISARM, data, blocking=True, context=context
):
msg = "Invalid Code"
raise AlexaSecurityPanelUnauthorizedError(msg)
response.add_context_property(
{
"name": "armState",
"namespace": "Alexa.SecurityPanelController",
"value": "DISARMED",
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "SetMode"))
async def async_api_set_mode(hass, config, directive, context):
"""Process a SetMode directive."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
mode = directive.payload["mode"]
# Fan Direction
if instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
_, direction = mode.split(".")
if direction in (fan.DIRECTION_REVERSE, fan.DIRECTION_FORWARD):
service = fan.SERVICE_SET_DIRECTION
data[fan.ATTR_DIRECTION] = direction
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
_, position = mode.split(".")
if position == cover.STATE_CLOSED:
service = cover.SERVICE_CLOSE_COVER
elif position == cover.STATE_OPEN:
service = cover.SERVICE_OPEN_COVER
elif position == "custom":
service = cover.SERVICE_STOP_COVER
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ModeController",
"instance": instance,
"name": "mode",
"value": mode,
}
)
return response
@HANDLERS.register(("Alexa.ModeController", "AdjustMode"))
async def async_api_adjust_mode(hass, config, directive, context):
"""Process a AdjustMode request.
Requires capabilityResources supportedModes to be ordered.
Only supportedModes with ordered=True support the adjustMode directive.
"""
# Currently no supportedModes are configured with ordered=True to support this request.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.ToggleController", "TurnOn"))
async def async_api_toggle_on(hass, config, directive, context):
"""Process a toggle on request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = True
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "ON",
}
)
return response
@HANDLERS.register(("Alexa.ToggleController", "TurnOff"))
async def async_api_toggle_off(hass, config, directive, context):
"""Process a toggle off request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
# Fan Oscillating
if instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
service = fan.SERVICE_OSCILLATE
data[fan.ATTR_OSCILLATING] = False
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ToggleController",
"instance": instance,
"name": "toggleState",
"value": "OFF",
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "SetRangeValue"))
async def async_api_set_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_value = directive.payload["rangeValue"]
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_value = int(range_value)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
speed = next((v for i, v in enumerate(speed_list) if i == range_value), None)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER
else:
service = cover.SERVICE_SET_COVER_POSITION
data[cover.ATTR_POSITION] = range_value
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_value = int(range_value)
if range_value == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
elif range_value == 100:
service = cover.SERVICE_OPEN_COVER_TILT
else:
service = cover.SERVICE_SET_COVER_TILT_POSITION
data[cover.ATTR_TILT_POSITION] = range_value
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_value = float(range_value)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
data[input_number.ATTR_VALUE] = min(max_value, max(min_value, range_value))
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
speed = next(
(v for i, v in enumerate(speed_list) if i == int(range_value)), None
)
if not speed:
msg = "Entity does not support value"
raise AlexaInvalidValueError(msg)
data[vacuum.ATTR_FAN_SPEED] = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": range_value,
}
)
return response
@HANDLERS.register(("Alexa.RangeController", "AdjustRangeValue"))
async def async_api_adjust_range(hass, config, directive, context):
"""Process a next request."""
entity = directive.entity
instance = directive.instance
domain = entity.domain
service = None
data = {ATTR_ENTITY_ID: entity.entity_id}
range_delta = directive.payload["rangeValueDelta"]
range_delta_default = bool(directive.payload["rangeValueDeltaDefault"])
response_value = 0
# Fan Speed
if instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
range_delta = int(range_delta)
service = fan.SERVICE_SET_SPEED
speed_list = entity.attributes[fan.ATTR_SPEED_LIST]
current_speed = entity.attributes[fan.ATTR_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
if speed == fan.SPEED_OFF:
service = fan.SERVICE_TURN_OFF
data[fan.ATTR_SPEED] = response_value = speed
# Cover Position
elif instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_POSITION
current = entity.attributes.get(cover.ATTR_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current position"
raise AlexaInvalidValueError(msg)
position = response_value = min(100, max(0, range_delta + current))
if position == 100:
service = cover.SERVICE_OPEN_COVER
elif position == 0:
service = cover.SERVICE_CLOSE_COVER
else:
data[cover.ATTR_POSITION] = position
# Cover Tilt
elif instance == f"{cover.DOMAIN}.tilt":
range_delta = int(range_delta * 20) if range_delta_default else int(range_delta)
service = SERVICE_SET_COVER_TILT_POSITION
current = entity.attributes.get(cover.ATTR_TILT_POSITION)
if not current:
msg = f"Unable to determine {entity.entity_id} current tilt position"
raise AlexaInvalidValueError(msg)
tilt_position = response_value = min(100, max(0, range_delta + current))
if tilt_position == 100:
service = cover.SERVICE_OPEN_COVER_TILT
elif tilt_position == 0:
service = cover.SERVICE_CLOSE_COVER_TILT
else:
data[cover.ATTR_TILT_POSITION] = tilt_position
# Input Number Value
elif instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
range_delta = float(range_delta)
service = input_number.SERVICE_SET_VALUE
min_value = float(entity.attributes[input_number.ATTR_MIN])
max_value = float(entity.attributes[input_number.ATTR_MAX])
current = float(entity.state)
data[input_number.ATTR_VALUE] = response_value = min(
max_value, max(min_value, range_delta + current)
)
# Vacuum Fan Speed
elif instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
range_delta = int(range_delta)
service = vacuum.SERVICE_SET_FAN_SPEED
speed_list = entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
current_speed = entity.attributes[vacuum.ATTR_FAN_SPEED]
current_speed_index = next(
(i for i, v in enumerate(speed_list) if v == current_speed), 0
)
new_speed_index = min(
len(speed_list) - 1, max(0, current_speed_index + range_delta)
)
speed = next(
(v for i, v in enumerate(speed_list) if i == new_speed_index), None
)
data[vacuum.ATTR_FAN_SPEED] = response_value = speed
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
domain, service, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.RangeController",
"instance": instance,
"name": "rangeValue",
"value": response_value,
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "ChangeChannel"))
async def async_api_changechannel(hass, config, directive, context):
"""Process a change channel request."""
channel = "0"
entity = directive.entity
channel_payload = directive.payload["channel"]
metadata_payload = directive.payload["channelMetadata"]
payload_name = "number"
if "number" in channel_payload:
channel = channel_payload["number"]
payload_name = "number"
elif "callSign" in channel_payload:
channel = channel_payload["callSign"]
payload_name = "callSign"
elif "affiliateCallSign" in channel_payload:
channel = channel_payload["affiliateCallSign"]
payload_name = "affiliateCallSign"
elif "uri" in channel_payload:
channel = channel_payload["uri"]
payload_name = "uri"
elif "name" in metadata_payload:
channel = metadata_payload["name"]
payload_name = "callSign"
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.const.ATTR_MEDIA_CONTENT_ID: channel,
media_player.const.ATTR_MEDIA_CONTENT_TYPE: media_player.const.MEDIA_TYPE_CHANNEL,
}
await hass.services.async_call(
entity.domain,
media_player.const.SERVICE_PLAY_MEDIA,
data,
blocking=False,
context=context,
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {payload_name: channel},
}
)
return response
@HANDLERS.register(("Alexa.ChannelController", "SkipChannels"))
async def async_api_skipchannel(hass, config, directive, context):
"""Process a skipchannel request."""
channel = int(directive.payload["channelCount"])
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if channel < 0:
service_media = SERVICE_MEDIA_PREVIOUS_TRACK
else:
service_media = SERVICE_MEDIA_NEXT_TRACK
for _ in range(abs(channel)):
await hass.services.async_call(
entity.domain, service_media, data, blocking=False, context=context
)
response = directive.response()
response.add_context_property(
{
"namespace": "Alexa.ChannelController",
"name": "channel",
"value": {"number": ""},
}
)
return response
@HANDLERS.register(("Alexa.SeekController", "AdjustSeekPosition"))
async def async_api_seek(hass, config, directive, context):
"""Process a seek request."""
entity = directive.entity
position_delta = int(directive.payload["deltaPositionMilliseconds"])
current_position = entity.attributes.get(media_player.ATTR_MEDIA_POSITION)
if not current_position:
msg = f"{entity} did not return the current media position."
raise AlexaVideoActionNotPermittedForContentError(msg)
seek_position = int(current_position) + int(position_delta / 1000)
if seek_position < 0:
seek_position = 0
media_duration = entity.attributes.get(media_player.ATTR_MEDIA_DURATION)
if media_duration and 0 < int(media_duration) < seek_position:
seek_position = media_duration
data = {
ATTR_ENTITY_ID: entity.entity_id,
media_player.ATTR_MEDIA_SEEK_POSITION: seek_position,
}
await hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_MEDIA_SEEK,
data,
blocking=False,
context=context,
)
# convert seconds to milliseconds for StateReport.
seek_position = int(seek_position * 1000)
payload = {"properties": [{"name": "positionMilliseconds", "value": seek_position}]}
return directive.response(
name="StateReport", namespace="Alexa.SeekController", payload=payload
)
@HANDLERS.register(("Alexa.EqualizerController", "SetMode"))
async def async_api_set_eq_mode(hass, config, directive, context):
"""Process a SetMode request for EqualizerController."""
mode = directive.payload["mode"]
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
sound_mode_list = entity.attributes.get(media_player.const.ATTR_SOUND_MODE_LIST)
if sound_mode_list and mode.lower() in sound_mode_list:
data[media_player.const.ATTR_SOUND_MODE] = mode.lower()
else:
msg = f"failed to map sound mode {mode} to a mode on {entity.entity_id}"
raise AlexaInvalidValueError(msg)
await hass.services.async_call(
entity.domain,
media_player.SERVICE_SELECT_SOUND_MODE,
data,
blocking=False,
context=context,
)
return directive.response()
@HANDLERS.register(("Alexa.EqualizerController", "AdjustBands"))
@HANDLERS.register(("Alexa.EqualizerController", "ResetBands"))
@HANDLERS.register(("Alexa.EqualizerController", "SetBands"))
async def async_api_bands_directive(hass, config, directive, context):
"""Handle an AdjustBands, ResetBands, SetBands request.
Only mode directives are currently supported for the EqualizerController.
"""
# Currently bands directives are not supported.
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
@HANDLERS.register(("Alexa.TimeHoldController", "Hold"))
async def async_api_hold(hass, config, directive, context):
"""Process a TimeHoldController Hold request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_PAUSE
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.TimeHoldController", "Resume"))
async def async_api_resume(hass, config, directive, context):
"""Process a TimeHoldController Resume request."""
entity = directive.entity
data = {ATTR_ENTITY_ID: entity.entity_id}
if entity.domain == timer.DOMAIN:
service = timer.SERVICE_START
elif entity.domain == vacuum.DOMAIN:
service = vacuum.SERVICE_START_PAUSE
else:
msg = "Entity does not support directive"
raise AlexaInvalidDirectiveError(msg)
await hass.services.async_call(
entity.domain, service, data, blocking=False, context=context
)
return directive.response()
@HANDLERS.register(("Alexa.CameraStreamController", "InitializeCameraStreams"))
async def async_api_initialize_camera_stream(hass, config, directive, context):
"""Process a InitializeCameraStreams request."""
entity = directive.entity
stream_source = await camera.async_request_stream(hass, entity.entity_id, fmt="hls")
camera_image = hass.states.get(entity.entity_id).attributes[ATTR_ENTITY_PICTURE]
try:
external_url = network.get_url(
hass,
allow_internal=False,
allow_ip=False,
require_ssl=True,
require_standard_port=True,
)
except network.NoURLAvailableError as err:
raise AlexaInvalidValueError(
"Failed to find suitable URL to serve to Alexa"
) from err
payload = {
"cameraStreams": [
{
"uri": f"{external_url}{stream_source}",
"protocol": "HLS",
"resolution": {"width": 1280, "height": 720},
"authorizationType": "NONE",
"videoCodec": "H264",
"audioCodec": "AAC",
}
],
"imageUri": f"{external_url}{camera_image}",
}
return directive.response(
name="Response", namespace="Alexa.CameraStreamController", payload=payload
)
|
|
from .common import *
import av
from fractions import Fraction
from av.buffer import Buffer
from av.packet import Packet
from av.audio.resampler import AudioResampler
def iter_frames(container, stream):
for packet in container.demux(stream):
for frame in packet.decode():
yield frame
def iter_raw_frames(path, packet_sizes, decoder):
with open(path, 'rb') as f:
for size in packet_sizes:
packet = Packet(size)
read_size = f.readinto(packet)
if not read_size:
break
for frame in decoder.decode(packet):
yield frame
for frame in decoder.flush():
yield frame
class TestCoders(TestCase):
def test_encoding_png(self):
self.image_sequence_encode('png')
def test_encoding_mjpeg(self):
self.image_sequence_encode('mjpeg')
def test_encoding_tiff(self):
self.image_sequence_encode('tiff')
def image_sequence_encode(self, codec):
if not codec in av.codec.codecs_availible:
raise SkipTest()
container = av.open(fate_suite('h264/interlaced_crop.mp4'))
video_stream = next(s for s in container.streams if s.type == 'video')
width = 640
height = 480
encoder = av.Encoder(codec)
pix_fmt = encoder.codec.video_formats[0].name
encoder.width = width
encoder.height = height
encoder.time_base = Fraction(24000, 1001)
encoder.pix_fmt = pix_fmt
encoder.open()
frame_count = 1
path_list = []
for frame in iter_frames(container, video_stream):
new_frame = frame.reformat(width, height, pix_fmt)
for i, new_packet in enumerate(encoder.encode(new_frame)):
path = self.sandboxed('%s/encoder.%04d.%s' % (codec,
frame_count,
codec if codec != 'mjpeg' else 'jpg'))
path_list.append(path)
with open(path, 'wb') as f:
f.write(new_packet)
frame_count += 1
if frame_count > 5:
break
decoder = av.Decoder(codec)
decoder.open()
for path in path_list:
with open(path, 'rb') as f:
size = os.fstat(f.fileno()).st_size
packet = Packet(size)
size = f.readinto(packet)
frame = next(decoder.decode(packet))
self.assertEqual(frame.width, width)
self.assertEqual(frame.height, height)
self.assertEqual(frame.format.name, pix_fmt)
def test_encoding_h264(self):
self.video_encoding('libx264', {'crf':'19'})
def test_encoding_mpeg4(self):
self.video_encoding('mpeg4')
def test_encoding_mpeg1video(self):
self.video_encoding('mpeg1video')
def test_encoding_dvvideo(self):
options = {'pix_fmt':'yuv411p',
'width':720,
'height':480}
self.video_encoding('dvvideo', options)
def test_encoding_dnxhd(self):
options = {'b':'90M', #bitrate
'pix_fmt':'yuv422p',
'width': 1920,
'height': 1080,
'time_base': Fraction(30000, 1001),
'max_frames': 5}
self.video_encoding('dnxhd', options)
def video_encoding(self, codec, options = {}):
if not codec in av.codec.codecs_availible:
raise SkipTest()
container = av.open(fate_suite('h264/interlaced_crop.mp4'))
video_stream = next(s for s in container.streams if s.type == 'video')
pix_fmt = options.get('pix_fmt', 'yuv420p')
width = options.get('width', 640)
height = options.get('height', 480)
max_frames = options.get('max_frames', 1000)
time_base = options.get('time_base', Fraction(24000, 1001))
for key in ('pix_fmt', 'width', 'height', 'max_frames', 'time_base'):
if key in options:
del options[key]
encoder = av.Encoder(codec)
encoder.width = width
encoder.height = height
encoder.time_base = time_base
encoder.pix_fmt = pix_fmt
encoder.options = options
encoder.open()
path = self.sandboxed('encoder.%s' % codec)
packet_sizes = []
frame_count = 0
test_bad = True
with open(path, 'wb') as f:
for frame in iter_frames(container, video_stream):
if frame_count > max_frames:
break
if test_bad:
bad_frame = frame.reformat(width, 100, pix_fmt)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_frame = frame.reformat(100, height, pix_fmt)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_frame = frame.reformat(width, height, "rgb24")
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
test_bad = False
new_frame = frame.reformat(width, height, pix_fmt)
for new_packet in encoder.encode(new_frame):
packet_sizes.append(new_packet.size)
f.write(new_packet)
frame_count += 1
for new_packet in encoder.flush():
packet_sizes.append(new_packet.size)
f.write(new_packet)
dec_codec = codec
if codec == 'libx264':
dec_codec = 'h264'
decoder = av.Decoder(dec_codec)
decoder.open()
decoded_frame_count = 0
for frame in iter_raw_frames(path, packet_sizes, decoder):
decoded_frame_count += 1
self.assertEqual(frame.width, width)
self.assertEqual(frame.height, height)
self.assertEqual(frame.format.name, pix_fmt)
self.assertEqual(decoded_frame_count, frame_count)
def test_encoding_pcm_s24le(self):
self.audio_encoding('pcm_s24le')
def test_encoding_aac(self):
self.audio_encoding('aac')
def test_encoding_mp2(self):
self.audio_encoding('mp2')
def audio_encoding(self, codec):
if not codec in av.codec.codecs_availible:
raise SkipTest()
encoder = av.Encoder(codec)
if encoder.codec.experimental:
raise SkipTest()
sample_fmt = encoder.codec.audio_formats[-1].name
sample_rate = 48000
channel_layout = "stereo"
channels = 2
encoder.time_base = sample_rate
encoder.sample_rate = sample_rate
encoder.sample_fmt = sample_fmt
encoder.channels = channels
encoder.open()
resampler = AudioResampler(sample_fmt, channel_layout, sample_rate)
container = av.open(fate_suite('audio-reference/chorusnoise_2ch_44kHz_s16.wav'))
audio_stream = next(s for s in container.streams if s.type == 'audio')
path = self.sandboxed('encoder.%s' % codec)
samples = 0
packet_sizes = []
test_bad = True
with open(path, 'w') as f:
for frame in iter_frames(container, audio_stream):
if test_bad:
bad_resampler = AudioResampler(sample_fmt, "mono", sample_rate)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_resampler = AudioResampler(sample_fmt, channel_layout, 3000)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
bad_resampler = AudioResampler('u8', channel_layout, 3000)
bad_frame = bad_resampler.resample(frame)
with self.assertRaises(ValueError):
next(encoder.encode(bad_frame))
test_bad = False
resampled_frame = resampler.resample(frame)
samples += resampled_frame.samples
for new_packet in encoder.encode(resampled_frame):
# bytearray because python can
# freaks out if the first byte is NULL
f.write(bytearray(new_packet))
packet_sizes.append(new_packet.size)
for new_packet in encoder.flush():
packet_sizes.append(new_packet.size)
f.write(bytearray(new_packet))
decoder = av.Decoder(codec)
decoder.time_base = sample_rate
decoder.sample_rate = sample_rate
decoder.sample_fmt = sample_fmt
decoder.channels = channels
decoder.open()
result_samples = 0
# should have more asserts but not sure what to check
# libav and ffmpeg give different results
# so can really use checksums
for frame in iter_raw_frames(path, packet_sizes, decoder):
result_samples += frame.samples
self.assertEqual(frame.rate, sample_rate)
self.assertEqual(len(frame.layout.channels), channels)
# import logging
# logging.basicConfig()
|
|
# Copyright (c) The University of Edinburgh 2014-2015
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The dispel4py workflow graph.
'''
import networkx as nx
import sys
from dispel4py.core import GenericPE
class WorkflowNode:
'''
Wrapper class for workflow nodes - wraps around general subclasses
of classes denoting PEs, that is GenericPEs.
'''
# Supported types of workflow nodes:
WORKFLOW_NODE_PE = 0
WORKFLOW_NODE_FN = 1
WORKFLOW_NODE_CP = 2
node_counter = 0
def __init__(self, o):
self.obj = o
self.outputs = []
self.inputs = []
if isinstance(o, GenericPE):
o.id = o.name + str(WorkflowNode.node_counter)
WorkflowNode.node_counter += 1
self.nodeType = self.WORKFLOW_NODE_PE
for i in o.inputconnections.values():
# empty for the time being - only the index matters
self.inputs.append({})
for i in o.outputconnections.values():
self.outputs.append({})
elif isinstance(o, WorkflowGraph):
self.nodeType = self.WORKFLOW_NODE_CP
try:
for i in o.inputmappings:
self.inputs.append({})
except AttributeError:
pass
try:
for i in o.outputmappings:
self.outputs.append({})
except AttributeError:
pass
else:
sys.stderr.write('Error: Unknown type of object passed as a \
Workflow Node: %s\n' % type(o))
raise Exception("Unknown type of object passed as a \
Workflow Node: %s" % type(o))
def getContainedObject(self):
''' Returns the wrapped PE or function. '''
return self.obj
# Used as attribute names
FROM_CONNECTION = 'from_connection'
TO_CONNECTION = 'to_connection'
DIRECTION = 'direction'
class WorkflowGraph(object):
"""
A graph representing the workflow and related methods
"""
def __init__(self):
self.graph = nx.Graph()
self.objToNode = {}
def add(self, n):
'''
Adds node n, which must be an instance of
:py:class:`dispel4py.core.GenericPE`, and returns the created workflow
node.
:rtype: WorkflowNode
'''
nd = WorkflowNode(n)
self.graph.add_node(nd)
self.objToNode[n] = nd
return nd
def connect(self, fromNode, fromConnection, toNode, toConnection):
'''
Connect the two given nodes from the given output to the given input.
If the nodes are not in the graph, they will be added.
:param fromNode: the source PE of the connection
:param fromConnection: the name of the output of the source node
'fromNode'
:type fromConnection: String
:param toNode: the destination PE of the connection
:param toConnection: the name of the input of the destination node
'toNode'
:type toConnection: String
'''
if fromNode not in self.objToNode:
self.add(fromNode)
if toNode not in self.objToNode:
self.add(toNode)
fromWfNode = self.objToNode[fromNode]
toWfNode = self.objToNode[toNode]
if self.graph.has_edge(fromWfNode, toWfNode):
self.graph[fromWfNode][toWfNode]['ALL_CONNECTIONS']\
.append((fromConnection, toConnection))
else:
self.graph.add_edge(fromWfNode, toWfNode,
**{'FROM_CONNECTION': fromConnection,
'TO_CONNECTION': toConnection,
'DIRECTION': (fromNode, toNode),
'ALL_CONNECTIONS': [
(fromConnection, toConnection)]})
def getContainedObjects(self):
nodes = [node.getContainedObject() for node in self.graph.nodes()]
return sorted(nodes, key=lambda x: x.id)
def propagate_types(self):
'''
Propagates the types throughout the graph by retrieving the output
types from each node, starting from the root, and providing them to
connected consumers.
'''
visited = set()
for node in self.graph.nodes():
if node not in visited:
self.__assign_types(node, visited)
def __assign_types(self, node, visited):
pe = node.getContainedObject()
inputTypes = {}
for edge in self.graph[node].values():
if pe == edge['DIRECTION'][1]:
# pe is destination so look up the types produced by sources
source = edge['DIRECTION'][0]
sourceNode = self.objToNode[source]
if sourceNode not in visited:
self.__assign_types(sourceNode, visited)
inType = source.getOutputTypes()[edge['FROM_CONNECTION']]
inputTypes[edge['TO_CONNECTION']] = inType
pe.setInputTypes(inputTypes)
visited.add(node)
# print "%s: Assigned inputs = %s, \
# received outputs = %s" % \
# (pe.__class__.__name__, inputTypes, pe.getOutputTypes())
def flatten(self):
'''
Subgraphs contained within composite PEs are added to the top level
workflow.
'''
hasComposites = True
toRemove = set()
while hasComposites:
hasComposites = False
toRemove = set()
for node in self.graph.nodes():
if node.nodeType == WorkflowNode.WORKFLOW_NODE_CP:
hasComposites = True
toRemove.add(node)
wfGraph = node.getContainedObject()
subgraph = wfGraph.graph
self.graph.add_nodes_from(subgraph.nodes(data=True))
self.graph.add_edges_from(subgraph.edges(data=True))
self.objToNode.update(wfGraph.objToNode)
for inputname in wfGraph.inputmappings:
toPE, toConnection = wfGraph.inputmappings[inputname]
edge = None
fromPE, fromConnection = None, None
for e in self.graph[node].values():
if wfGraph == e['DIRECTION'][1] \
and inputname == e['TO_CONNECTION']:
fromPE = e['DIRECTION'][0]
fromConnection = e['FROM_CONNECTION']
edge = self.objToNode[fromPE],\
self.objToNode[wfGraph]
break
if edge is not None:
self.connect(fromPE, fromConnection,
toPE, toConnection)
for outputname in wfGraph.outputmappings:
fromPE, fromConnection = \
wfGraph.outputmappings[outputname]
destinations = []
for e in self.graph[node].values():
if wfGraph == e['DIRECTION'][0] \
and outputname == e['FROM_CONNECTION']:
toPE = e['DIRECTION'][1]
toConnection = e['TO_CONNECTION']
destinations.append((toPE, toConnection))
for (toPE, toConnection) in destinations:
# print 'connecting output %s.%s' %\
# (toPE, toConnection)
self.connect(fromPE, fromConnection,
toPE, toConnection)
self.graph.remove_nodes_from(toRemove)
def _create_dot(graph, instanceNames={}, counter=0):
dot = ''
# assign unique names
for node in graph.graph.nodes():
try:
name = node.getContainedObject().id, counter
except:
name = node.getContainedObject().__class__.__name__, counter
instanceNames[node] = name
counter += 1
# now add all the nodes and their input and output connections
cluster_index = 0
for node in graph.graph.nodes():
pe = node.getContainedObject()
if isinstance(pe, WorkflowGraph):
dot += _create_cluster(pe, cluster_index, instanceNames, counter)
cluster_index += 1
continue
name, index = instanceNames[node]
dot += name + str(index) + "[label=\"{ "
# add inputs
inputNames = []
outputNames = []
for edge in graph.graph[node].values():
if pe == edge['DIRECTION'][1]:
inputName = edge['TO_CONNECTION']
dotName = "<in_" + inputName + ">" + inputName
if dotName not in inputNames:
inputNames.append(dotName)
else:
outputName = edge['FROM_CONNECTION']
dotName = "<out_" + outputName + ">" + outputName
if dotName not in outputNames:
outputNames.append(dotName)
if inputNames:
dot += '{' + ' | '.join(inputNames) + '} | '
dot += name
if outputNames:
dot += ' | {' + ' | '.join(outputNames) + '}'
dot += " }\"];\n"
# connect the inputs and outputs
for node in graph.graph.nodes():
pe = node.getContainedObject()
for edge in graph.graph[node].values():
if pe == edge['DIRECTION'][0]:
if isinstance(pe, WorkflowGraph):
inner_source, source_output = \
pe.outputmappings[edge['FROM_CONNECTION']]
node = pe.objToNode[inner_source]
else:
source_output = edge['FROM_CONNECTION']
# pe is the source so look up the connected destination
dest = edge['DIRECTION'][1]
if isinstance(dest, WorkflowGraph):
inner_dest, dest_input = \
dest.inputmappings[edge['TO_CONNECTION']]
destNode = dest.objToNode[inner_dest]
else:
destNode = graph.objToNode[dest]
dest_input = edge['TO_CONNECTION']
dot += '%s%s' % instanceNames[node] + ':out_' + source_output
dot += ' -> '
dot += '%s%s' % instanceNames[destNode] + ':in_' + dest_input
dot += ';\n'
return dot
def _create_cluster(graph, index, instanceNames, counter):
dot = 'subgraph cluster_%s {\n' % index
try:
# names for composite PEs are optional
dot += 'label = "%s";' % graph.name
except:
pass
dot += 'style=filled;\n'
dot += 'color=lightgrey;\n'
if index % 2:
dot += 'fillcolor=lightgrey;\n'
dot += _create_dot(graph, instanceNames, counter)
dot += '}\n'
return dot
def draw(graph):
'''
Creates a representation of the workflow graph in the dot language.
'''
dot = 'digraph request\n{\nnode [shape=Mrecord, \
style=filled, fillcolor=white];\n'
dot += _create_dot(graph)
dot += '}\n'
return dot
def drawDot(graph): # pragma: no cover
'''
Draws the workflow as a graph and creates a PNG image using graphviz dot.
'''
from subprocess import Popen, PIPE
dot = draw(graph)
nodelist = graph.getContainedObjects()
#p = Popen(['dot', '-T', 'svg','-o','dot.svg'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
p = Popen(['dot', '-T', 'png'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(dot.encode('utf-8'))
return stdout
|
|
"""
Load mean geopotential heights and plot in colour
"""
import os, sys
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from mpl_toolkits.basemap import Basemap
import iris
import iris.analysis.cartography
import numpy as np
import imp
import h5py
import cartopy.crs as ccrs
import scipy.interpolate
from textwrap import wrap
model_name_convert_title = imp.load_source('util', '/nfs/a90/eepdw/python_scripts/model_name_convert_title.py')
def main():
# Plot diagnostics, model and pressure levels etc. to plot on for looping through
plot_type='mean'
plot_diags=['temp', 'sp_hum']
plot_levels = [925, 850, 700, 500]
#plot_levels = [925]
#experiment_ids = ['djznq', 'djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
experiment_ids = ['dkjxq']
#experiment_id = 'djzny'
p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10]
###### Unrotate global model data ##############################
######### Regrid to global, and difference #######
############################################################################
## Load global wind and orography
fw_global = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/djzn/djzny/30201_mean.pp'
fo_global = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/djzn/djzny/33.pp'
u_global,v_global = iris.load(fw_global)
oro_global = iris.load_cube(fo_global)
# Unrotate global coordinates
cs_glob = u_global.coord_system('CoordSystem')
cs_glob_v = v_global.coord_system('CoordSystem')
cs_glob_oro = oro_global.coord_system('CoordSystem')
lat_g = u_global.coord('grid_latitude').points
lon_g = u_global.coord('grid_longitude').points
lat_g_oro = oro_global.coord('grid_latitude').points
lon_g_oro = oro_global.coord('grid_longitude').points
if cs_glob!=cs_glob_v:
print 'Global model u and v winds have different poles of rotation'
# Unrotate global winds
if isinstance(cs_glob, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Winds - djzny - Unrotate pole %s' % cs_glob
lons_g, lats_g = np.meshgrid(lon_g, lat_g)
lons_g,lats_g = iris.analysis.cartography.unrotate_pole(lons_g,lats_g, cs_glob.grid_north_pole_longitude, cs_glob.grid_north_pole_latitude)
lon_g=lons_g[0]
lat_g=lats_g[:,0]
for i, coord in enumerate (u_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uglobal = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uglobal = i
csur_glob=cs_glob.ellipsoid
u_global.remove_coord('grid_latitude')
u_global.remove_coord('grid_longitude')
u_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
u_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print u_global
v_global.remove_coord('grid_latitude')
v_global.remove_coord('grid_longitude')
v_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
v_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print v_global
# Unrotate global model
if isinstance(cs_glob_oro, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Orography - djzny - Unrotate pole %s - Winds and other diagnostics may have different number of grid points' % cs_glob_oro
lons_go, lats_go = np.meshgrid(lon_g_oro, lat_g_oro)
lons_go,lats_go = iris.analysis.cartography.unrotate_pole(lons_go,lats_go, cs_glob_oro.grid_north_pole_longitude, cs_glob_oro.grid_north_pole_latitude)
lon_g_oro=lons_go[0]
lat_g_oro=lats_go[:,0]
for i, coord in enumerate (oro_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_og = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_og = i
csur_glob_oro=cs_glob_oro.ellipsoid
oro_global.remove_coord('grid_latitude')
oro_global.remove_coord('grid_longitude')
oro_global.add_dim_coord(iris.coords.DimCoord(points=lat_g_oro, standard_name='grid_latitude', units='degrees', coord_system=csur_glob_oro), lat_dim_coord_og)
oro_global.add_dim_coord(iris.coords.DimCoord(points=lon_g_oro, standard_name='grid_longitude', units='degrees', coord_system=csur_glob_oro), lon_dim_coord_og)
###############################################################################
#################### Load global heights and temp/sp_hum #####################
f_glob_h = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/408_pressure_levels_interp_pressure_djzny_%s' % (plot_type)
######################################################################################
with h5py.File(f_glob_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights_global = mh[. . .]
######################################################################################
## Loop through experiment id's ######################################################
for pl in plot_diags:
plot_diag=pl
f_glob_d = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/%s_pressure_levels_interp_djzny_%s' % (plot_diag, plot_type)
with h5py.File(f_glob_d, 'r') as i:
mg = i['%s' % plot_type]
mean_var_global = mg[. . .]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
###############################################################################
#################### Load global heights and temp/sp_hum #####################
fname_h = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/408_pressure_levels_interp_pressure_%s_%s' % (experiment_id, plot_type)
fname_d = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/%s_pressure_levels_interp_%s_%s' % (plot_diag, experiment_id, plot_type)
# print fname_h
# print fname_d
# Height data file
with h5py.File(fname_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights = mh[. . .]
# print mean_heights.shape
with h5py.File(fname_d, 'r') as i:
mh = i['%s' % plot_type]
mean_var = mh[. . .]
# print mean_var.shape
f_oro = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/%s/%s/409.pp' % (expmin1, experiment_id)
oro = iris.load_cube(f_oro)
#print oro
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
fu = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/%s/%s/30201_mean.pp' % (expmin1, experiment_id)
u_wind,v_wind = iris.load(fu)
# Wind may have different number of grid points so need to do unrotate again for wind grid points
lat_w = u_wind.coord('grid_latitude').points
lon_w = u_wind.coord('grid_longitude').points
p_levs = u_wind.coord('pressure').points
lat = oro.coord('grid_latitude').points
lon = oro.coord('grid_longitude').points
cs_w = u_wind.coord_system('CoordSystem')
cs = oro.coord_system('CoordSystem')
if isinstance(cs_w, iris.coord_systems.RotatedGeogCS):
print ' Wind - %s - Unrotate pole %s' % (experiment_id, cs_w)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
csur_w=cs_w.ellipsoid
for i, coord in enumerate (u_wind.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uwind = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uwind = i
u_wind.remove_coord('grid_latitude')
u_wind.remove_coord('grid_longitude')
u_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w),lat_dim_coord_uwind )
u_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w), lon_dim_coord_uwind)
v_wind.remove_coord('grid_latitude')
v_wind.remove_coord('grid_longitude')
v_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w), lat_dim_coord_uwind)
v_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w),lon_dim_coord_uwind )
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' 409.pp - %s - Unrotate pole %s' % (experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_corner_u,lat_corner_u = iris.analysis.cartography.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
#lon_highu,lat_highu = iris.analysis.cartography.unrotate_pole(lon_high, lat_high, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
csur=cs.ellipsoid
oro.remove_coord('grid_latitude')
oro.remove_coord('grid_longitude')
oro.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_oro)
oro.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_oro)
else:
lons, lats = np.meshgrid(lon, lat)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
############## Regrid and Difference #################################
# Regrid Height and Temp/Specific humidity to global grid
h_regrid = np.empty((len(lat_g_oro), len(lon_g_oro)))
v_regrid = np.empty((len(lat_g_oro), len(lon_g_oro)))
for p in plot_levels:
### Search for pressure level match
s = np.searchsorted(p_levels[::-1], p)
h_regrid = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_heights[:,:,-(s+1)].flatten() , (lats_go,lons_go),method='linear')
v_regrid = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_var[:,:,-(s+1)].flatten() , (lats_go,lons_go),method='linear')
# Difference heights
plt_h = np.where(np.isnan(h_regrid), np.nan, h_regrid - mean_heights_global[:,:,-(s+1)])
#Difference temperature/specific humidity
plt_v = np.where(np.isnan(v_regrid), np.nan, v_regrid - mean_var_global[:,:,-(s+1)])
# Set u,v for winds, linear interpolate to approx. 2 degree grid
sc = np.searchsorted(p_levs, p)
##### Does not work on iris1.0 as on Leeds computers. Does work on later versions
#u_interp = u_wind[sc,:,:]
#v_interp = v_wind[sc,:,:].
#sample_points = [('grid_latitude', np.arange(lat_low,lat_high,2)), ('grid_longitude', np.arange(lon_low,lon_high,2))]
#u = iris.analysis.interpolate.linear(u_interp, sample_points, extrapolation_mode='linear')
#v = iris.analysis.interpolate.linear(v_interp, sample_points).data
##### Does work on Iris 1.0
# 2 degree lats lon lists for wind regridding on plot
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
### Regrid winds to global, difference, and then regrid to 2 degree spacing
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
# print u_wind[sc,:,:].data.flatten().shape
u_wind_rg_to_glob = scipy.interpolate.griddata(fl_la_lo, u_wind[sc,:,:].data.flatten(), (lats_g, lons_g), method='linear')
v_wind_rg_to_glob = scipy.interpolate.griddata(fl_la_lo, v_wind[sc,:,:].data.flatten(), (lats_g, lons_g), method='linear')
u_w=u_wind_rg_to_glob-u_global[sc,:,:].data
v_w=v_wind_rg_to_glob-v_global[sc,:,:].data
#u_interp = u_wind[sc,:,:].data
#v_interp = v_wind[sc,:,:].data
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_g.flatten(),lons_g.flatten())
u = scipy.interpolate.griddata(fl_la_lo, u_w.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_w.flatten(), (lats_wi, lons_wi), method='linear')
#######################################################################################
### Plotting #########################################################################
#m_title = 'Height of %s-hPa level (m)' % (p)
# Set pressure height contour min/max
if p == 925:
clev_min = -24.
clev_max = 24.
elif p == 850:
clev_min = -24.
clev_max = 24.
elif p == 700:
clev_min = -24.
clev_max = 24.
elif p == 500:
clev_min = -24.
clev_max = 24.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p == 925:
clevpt_min = -10.
clevpt_max = 10.
elif p == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.linspace(clev_min, clev_max, num=24)
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#x, y = m(lons, lats)
x,y = m(lons_go,lats_go)
print x.shape
x_w,y_w = m(lons_wi, lats_wi)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85], axisbg='#262626')
m.drawcoastlines(color='#262626')
m.drawcountries(color='#262626')
m.drawcoastlines(linewidth=0.5)
#m.fillcontinents(color='#CCFF99')
m.drawparallels(np.arange(-80,81,10),labels=[1,1,0,0])
m.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1])
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='#262626',linewidths=0.5)
cmap=plt.cm.RdBu_r
#cmap.set_bad('#262626', 1.)
#cmap.set_over('#262626')
#cmap.set_under('#262626')
if plot_diag=='temp':
plt_v = np.ma.masked_outside(plt_v, clevpt_max+20, clevpt_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max), cmap=cmap, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%d')
cbar.set_ticks(np.arange(clevpt_min,clevpt_max+2,2.))
cbar.set_ticklabels(np.arange(clevpt_min,clevpt_max+2,2.))
cbar.set_label('K')
plt.suptitle('Difference from global model (Model - global ) of Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=10)
elif plot_diag=='sp_hum':
plt_v = np.ma.masked_outside(plt_v, clevsh_max+20, clevsh_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max), cmap=cmap, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
cbar.set_label('kg/kg')
plt.suptitle('Difference from global model (Model - Global Model ) of Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=10)
wind = m.quiver(x_w,y_w, u, v, scale=150,color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 5, '5 m/s', labelpos='W')
plt.clabel(cs_lin, fontsize=10, fmt='%d', color='black')
#plt.title('%s\n%s' % (m_title, model_name_convert_title.main(experiment_id)), fontsize=10)
plt.title('\n'.join(wrap('%s' % (model_name_convert_title.main(experiment_id)), 80)), fontsize=10)
#plt.show()
if not os.path.exists('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s' % (experiment_id, plot_diag)): os.makedirs('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s' % (experiment_id, plot_diag))
plt.savefig('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s/geop_height_difference_120LAM_%shPa_%s_%s.png' % (experiment_id, plot_diag, p, experiment_id, plot_diag), format='png', bbox_inches='tight')
# Save fig - update dpi if need for printing
if __name__ == '__main__':
main()
|
|
#! /usr/bin/env python
"""
Copyright [1999-2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
"""
# Hack to lazy load scipy only if required
from postgap.DataModel import *
import cPickle as pickle
import collections
import postgap.Globals
import numpy
import os.path
import logging
scipy = None
def compute_gwas_posteriors(cluster, associations, populations):
"""
Compute posterior of GWAS causality across all clusters
Arg1: GWAS_Cluster
Arg2: [GeneSNP_Association]
Arg3: dict(string => float)
Returntype: GWAS_Cluster
"""
prepped_cluster = compute_cluster_genetic_structure(cluster, associations, populations)
weighted_cluster = postgap.Finemap.compute_gwas_lambdas(prepped_cluster, postgap.Globals.KMAX)
return finemap_gwas_cluster(weighted_cluster)
def compute_cluster_genetic_structure(cluster, associations, population):
'''
Enriches GWAS clusters with z-scores, betas, MAFs and annotations
Arg1: GWAS_Cluster
Arg2: [GeneSNP_Association]
Arg3: dict(string => float)
Returntype: GWAS_Cluster
'''
if len(cluster.ld_snps) == len(cluster.gwas_snps):
ld_snps, ld_matrix, z_scores, betas = compute_ld_matrix(
cluster, population)
elif postgap.Globals.GWAS_SUMMARY_STATS_FILE is not None:
ld_snps, ld_matrix, z_scores, betas = extract_z_scores_from_file(
cluster, population)
else:
ld_snps, ld_matrix, z_scores, betas = impute_z_scores(cluster, population)
# if ld_snps has less than 10 element, it is less informative
# TOTO: TODO assert len(ld_snps) >= 10, sample_label + ' has less than 10 ld_snps in the cluster'
mafs = extract_snp_mafs(ld_snps, associations, population.lower())
annotations = (extract_snp_annotations(ld_snps, associations) > 0.).astype('float')
assert len(ld_snps) == ld_matrix.shape[0]
assert len(ld_snps) == ld_matrix.shape[1]
return GWAS_Cluster(cluster.gwas_snps, ld_snps, ld_matrix, z_scores, betas, mafs, annotations, None, None)
def extract_snp_mafs(ld_snps, associations, populations):
"""
Produce vector of mafs for the SNPs provided
Arg1: [SNP]
Arg2: [GeneSNP_Association]
Arg3: String
Returntype: Numpy vector
"""
maf_hash = collections.defaultdict(int)
for association in associations:
for evidence in association.regulatory_evidence:
if evidence.info is not None:
if evidence.info['MAFs'] is not None:
if evidence.source == 'VEP_reg' and populations in evidence.info['MAFs']:
maf_hash[association.snp.rsID] = evidence.info['MAFs'][populations]
return numpy.array([float(maf_hash[snp.rsID]) for snp in ld_snps])
def extract_snp_annotations(ld_snps, associations):
"""
Produce array of annotation for the SNPs provided
Arg1: [SNP]
Arg2: [GeneSNP_Association]
Returntype: Numpy 2D array
"""
annotation_hash = collections.defaultdict(lambda: collections.defaultdict(float))
for association in associations:
for evidence in association.cisregulatory_evidence + association.regulatory_evidence:
if evidence.source in ['GTEx']:
continue
else:
annotation_hash[evidence.source][evidence.snp.rsID] = evidence.score
return numpy.array([[annotation_hash[annotation][snp.rsID] for snp in ld_snps] for annotation in sorted(annotation_hash.keys())])
def compute_ld_matrix(cluster, population):
'''
Computes LD matrix, re-orders SNPs zccordingly, and extract Z-scores from
Cluster data.
Arg1: Cluster
Arg2: population (string)
Returntype: [SNP], numpy.matrix (square LD matrix), numpy.matrix (Z-score vector)
'''
# Compute ld_matrix
ld_snp_ids, ld_matrix = postgap.LD.get_pairwise_ld(cluster.ld_snps, population)
# Update list of LD SNPs
ld_snp_hash = dict((ld_snp.rsID, ld_snp) for index, ld_snp in enumerate(cluster.ld_snps))
ld_snps = [ld_snp_hash[rsID] for rsID in ld_snp_ids]
# Aggregate z_scores into a single vector
z_scores = [0] * len(ld_snps)
betas = [0] * len(ld_snps)
gwas_snp_hash = dict((gwas_snp.snp.rsID, gwas_snp) for gwas_snp in cluster.gwas_snps)
for index, ld_snp in enumerate(ld_snps):
z_scores[index] = gwas_snp_hash[ld_snp.rsID].z_score
betas[index] = gwas_snp_hash[ld_snp.rsID].beta
assert len(ld_snps) == ld_matrix.shape[0]
assert len(ld_snps) == ld_matrix.shape[1]
return ld_snps, ld_matrix, z_scores, betas
def extract_z_scores_from_file(cluster, population):
'''
Extracts Z-scores from summary stats file, computes LD matrix
TODO: It is inefficient torun through a 1GB file once per locus, this should be done only once
Arg1: Cluster
Returntype: [SNP], numpy.matrix (square LD matrix), numpy.matrix (Z-score vector)
'''
ld_snp_hash = dict((ld_snp.rsID, ld_snp)
for index, ld_snp in enumerate(cluster.ld_snps))
## Search ld snps in the GWAS summary stats file, and drop ld snps that cannot be found in the GWAS summary stats file from ld_snps
proper_gwas_cluster = postgap.GWAS.GWAS_File().create_gwas_cluster_with_pvalues_from_file(gwas_cluster=cluster, gwas_data_file=postgap.Globals.GWAS_SUMMARY_STATS_FILE)
## Extract z_scores for all the ld snps that can be found in the GWAS summary stats file
ld_snp_results = dict((ld_snp.snp, (ld_snp.pvalue, ld_snp.beta)) for index, ld_snp in enumerate(proper_gwas_cluster.ld_snps))
# Select all the found ld snps from ld_snps
found_ld_snps = [ld_snp_hash[rsID] for rsID in ld_snp_results]
# Compute ld_matrix
ld_snp_ids, ld_matrix = postgap.LD.get_pairwise_ld(
found_ld_snps, population)
# Update list of LD SNPs
ld_snps = [ld_snp_hash[rsID] for rsID in ld_snp_ids]
z_scores = [z_score_from_pvalue(ld_snp_results[rsID][0], ld_snp_results[rsID][1]) for rsID in ld_snp_ids]
betas = [ld_snp_results[rsID][1] for rsID in ld_snp_ids]
assert len(ld_snps) == ld_matrix.shape[0]
assert len(ld_snps) == ld_matrix.shape[1]
return ld_snps, ld_matrix, z_scores, betas
def impute_z_scores(cluster, population):
'''
Imputes Z-scores from available data, computes LD matrix
Arg1: Cluster
Returntype: [SNP], numpy.matrix (square LD matrix), numpy.matrix (Z-score vector)
'''
# Compute ld_matrix
ld_snp_ids, ld_matrix = postgap.LD.get_pairwise_ld(cluster.ld_snps, population)
# Update list of LD SNPs
ld_snp_hash = dict((ld_snp.rsID, ld_snp) for index, ld_snp in enumerate(cluster.ld_snps))
ld_snps = [ld_snp_hash[rsID] for rsID in ld_snp_ids]
# Determine which SNPs are missing values
gwas_snp_hash = dict((gwas_snp.snp.rsID, gwas_snp) for gwas_snp in cluster.gwas_snps)
missing_indices = numpy.array([index for index, ld_snp in enumerate(ld_snps) if ld_snp.rsID not in gwas_snp_hash]).astype(int)
known_z_scores = numpy.array([gwas_snp_hash[ld_snp.rsID].z_score for ld_snp in ld_snps if ld_snp.rsID in gwas_snp_hash])
known_betas = numpy.array([gwas_snp_hash[ld_snp.rsID].beta for ld_snp in ld_snps if ld_snp.rsID in gwas_snp_hash])
# Generate LD matrix of known values
ld_matrix_known = numpy.delete(ld_matrix, missing_indices, axis=1)
ld_matrix_known = numpy.delete(ld_matrix_known, missing_indices, axis=0)
# Generate LD matrix of known SNPs to missing SNPs
ld_matrix_k2m = ld_matrix[missing_indices, :]
ld_matrix_k2m = numpy.delete(ld_matrix_k2m, missing_indices, axis=1)
# Imputation
shrink_lambda = 0.1 # shrinkage factor, magic number
ld_matrix_known_shrink = shrink_lambda * numpy.diag(numpy.ones(ld_matrix_known.shape[0])) + (1-shrink_lambda) * ld_matrix_known
ld_matrix_k2m_shrink = (1-shrink_lambda) * ld_matrix_k2m
z_shrink_imputed = numpy.dot(numpy.dot(ld_matrix_k2m_shrink, numpy.linalg.pinv(ld_matrix_known_shrink, 0.0001)), known_z_scores)
beta_shrink_imputed = numpy.dot(numpy.dot(ld_matrix_k2m_shrink, numpy.linalg.pinv(ld_matrix_known_shrink, 0.0001)), known_betas)
# Aggregate z_scores into a single vector
z_scores = []
betas = []
for i in ld_snps:
z_scores.append(0)
betas.append(0)
for index, z_score, beta in zip(missing_indices, z_shrink_imputed, beta_shrink_imputed):
z_scores[index] = z_score
betas[index] = beta
for index, ld_snp in enumerate(ld_snps):
if ld_snp.rsID in gwas_snp_hash:
z_scores[index] = gwas_snp_hash[ld_snp.rsID].z_score
betas[index] = gwas_snp_hash[ld_snp.rsID].beta
assert len(ld_snps) == ld_matrix.shape[0]
assert len(ld_snps) == ld_matrix.shape[1]
return ld_snps, ld_matrix, z_scores, betas
def finemap_gwas_cluster(cluster):
'''
Enriches GWAS clusters with z-scores and GWAS posteriors
Arg1: GWAS_Cluster
Returntype: GWAS_Cluster
'''
logging.info("Finemap GWAS Cluster")
# Define experiment label (serves for debugging logs)
chrom = cluster.ld_snps[0].chrom
start = min(ld_snp.pos for ld_snp in cluster.ld_snps)
end = max(ld_snp.pos for ld_snp in cluster.ld_snps)
sample_label = 'GWAS_Cluster_%s:%i-%i' % (chrom, start, end)
# Define LD SNP labels (serves for debugging logs)
ld_snp_ids = [ld_snp.rsID for ld_snp in cluster.ld_snps]
# Define sample size: mean of max for each SNP
sample_sizes = map(lambda gwas_snp: max(
gwas_association.sample_size for gwas_association in gwas_snp.evidence), cluster.gwas_snps)
sample_size = sum(sample_sizes) / len(sample_sizes)
# Compute posterior
if postgap.Globals.TYPE == 'binom' or postgap.Globals.TYPE == 'ML':
configuration_posteriors = postgap.Finemap.finemap_v1(
z_scores=numpy.array(cluster.z_scores),
beta_scores=numpy.array(cluster.betas),
cov_matrix=cluster.ld_matrix,
n=sample_size,
labels=ld_snp_ids,
sample_label=sample_label,
lambdas=cluster.lambdas,
mafs=cluster.mafs,
annotations=cluster.annotations,
kmax=postgap.Globals.KMAX,
isGWAS=True
)
elif postgap.Globals.TYPE == 'EM' or postgap.Globals.TYPE == 'ML_EM':
configuration_posteriors = postgap.Finemap.finemap_v2(
z_scores=numpy.array(cluster.z_scores),
beta_scores=numpy.array(cluster.betas),
cov_matrix=cluster.ld_matrix,
n=sample_size,
labels=ld_snp_ids,
sample_label=sample_label,
lambdas=cluster.lambdas,
mafs=cluster.mafs,
annotations=cluster.annotations,
kmax=postgap.Globals.KMAX,
isGWAS=True
)
return GWAS_Cluster(cluster.gwas_snps, cluster.ld_snps, cluster.ld_matrix, cluster.z_scores, cluster.betas, cluster.mafs, cluster.annotations, configuration_posteriors, cluster.lambdas)
def compute_joint_posterior(cluster, gene_tissue_snp_eQTL_hash):
"""
Compute collocation posterior of gene expression and GWAS phenotype at the specified cluster and tissue
Arg1: GWAS_Cluster
Arg4: [GeneSNP_Association]
Returntype: Hash of hashes: Gene => Tissue => (rsID|_CLUSTER) => float
"""
return dict((gene, compute_gene_joint_posterior(cluster, gene, gene_tissue_snp_eQTL_hash[gene])) for gene in gene_tissue_snp_eQTL_hash)
def compute_gene_joint_posterior(cluster, gene, tissue_snp_eQTL_hash):
"""
Compute collocation posterior of gene expression and GWAS phenotype at the specified cluster and tissue
Arg1: GWAS_Cluster
Arg2: Gene
Arg3: Hash of hashes: Tissue => SNP => (Float, Float)
Returntype: Hash of hashes: Tissue => (rsID|_CLUSTER) => float
"""
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[0], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[1], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
return dict((tissue, compute_gene_tissue_joint_posterior(cluster, gene, tissue, tissue_snp_eQTL_hash[tissue])) for tissue in tissue_snp_eQTL_hash)
def compute_gene_tissue_joint_posterior(cluster, gene, tissue, eQTL_snp_hash):
"""
Compute posterior of gene expression regulation at the specified cluster and tissue
Arg1: GWAS_Cluster
Arg2: Gene
Arg3: Tissue (string)
Arg4: Hash string (rsID) => (Float (z-score), Float (beta))
Returntype: (rsID|_CLUSTER) => float
"""
# eQTL posteriors
eQTL_configuration_posteriors = compute_eqtl_posteriors(cluster, tissue, gene, eQTL_snp_hash)
## Joint posterior
sum_posteriors, config_sample = eQTL_configuration_posteriors.joint_posterior(cluster.gwas_configuration_posteriors)
# Organise information into a hash
res = dict((config, config_sample.posterior[config_sample.configurations[config]]) for config in config_sample.configurations)
res['_CLUSTER'] = sum_posteriors
return res
def compute_eqtl_posteriors(cluster, tissue, gene, eQTL_snp_hash):
"""
Compute posterior of gene expression regulation at the specified cluster and tissue
Arg1: GWAS_Cluster
Arg2: Tissue (string)
Arg3: Gene
Arg4: Hash string (rsID) => (Float (z-score), Float (beta))
Returntype: OneDConfigurationSample
"""
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[0], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[1], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
# Determine which SNPs are missing values
missing_indices = numpy.array([index for index, ld_snp in enumerate(cluster.ld_snps) if ld_snp.rsID not in eQTL_snp_hash]).astype(int)
known_z_scores = numpy.array([eQTL_snp_hash[ld_snp.rsID][0] for ld_snp in cluster.ld_snps if ld_snp.rsID in eQTL_snp_hash])
known_betas = numpy.array([eQTL_snp_hash[ld_snp.rsID][1] for ld_snp in cluster.ld_snps if ld_snp.rsID in eQTL_snp_hash])
assert all(beta is not None for beta in known_betas)
assert len(known_z_scores) > 0
assert len(missing_indices) != len(cluster.ld_snps), (missing_indices, known_z_scores)
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[0], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
assert len(cluster.ld_snps) == cluster.ld_matrix.shape[1], (len(cluster.ld_snps), cluster.ld_matrix.shape[0], cluster.ld_matrix.shape[1])
if len(missing_indices) > 0:
# Generate LD matrix of known values
ld_matrix_known = numpy.delete(cluster.ld_matrix, missing_indices, axis=1)
ld_matrix_known = numpy.delete(ld_matrix_known, missing_indices, axis=0)
assert ld_matrix_known.size > 0, (missing_indices, cluster.ld_matrix, ld_matrix_known)
# Generate LD matrix of known SNPs to missing SNPs
ld_matrix_k2m = cluster.ld_matrix[missing_indices, :]
ld_matrix_k2m = numpy.delete(ld_matrix_k2m, missing_indices, axis=1)
# Imputation
shrink_lambda = 0.1 # shrinkage factor, magic number
ld_matrix_known_shrink = shrink_lambda * numpy.diag(numpy.ones(ld_matrix_known.shape[0])) + (1-shrink_lambda) * ld_matrix_known
assert ld_matrix_known_shrink.size > 0, (missing_indices, ld_matrix, ld_matrix_known)
ld_matrix_k2m_shrink = (1-shrink_lambda) * ld_matrix_k2m
z_shrink_imputed = numpy.dot(numpy.dot(ld_matrix_k2m_shrink, numpy.linalg.pinv(ld_matrix_known_shrink, 0.0001)), known_z_scores)
beta_shrink_imputed = numpy.dot(numpy.dot(ld_matrix_k2m_shrink, numpy.linalg.pinv(ld_matrix_known_shrink, 0.0001)), known_betas)
# Aggregate z_scores into a single vector
z_scores = []
betas = []
for i in cluster.ld_snps:
z_scores.append(0)
betas.append(0)
for index, z_score, beta in zip(missing_indices, z_shrink_imputed, beta_shrink_imputed):
z_scores[index] = z_score
betas[index] = beta
for index, ld_snp in enumerate(cluster.ld_snps):
if ld_snp.rsID in eQTL_snp_hash:
z_scores[index] = eQTL_snp_hash[ld_snp.rsID][0]
betas[index] = eQTL_snp_hash[ld_snp.rsID][1]
else:
z_scores = known_z_scores
betas = known_betas
# Define experiment label (serves for debugging logs)
chrom = cluster.ld_snps[0].chrom
start = min(ld_snp.pos for ld_snp in cluster.ld_snps)
end = max(ld_snp.pos for ld_snp in cluster.ld_snps)
sample_label = 'eQTL_Cluster_%s:%i-%i_%s' % (chrom, start, end, gene)
# Learn F.A parameters in eQTL
lambdas = postgap.Finemap.compute_eqtl_lambdas(cluster, numpy.array(z_scores), postgap.Globals.KMAX)
# Compute posterior
logging.debug("Finemap eQTL Cluster")
return postgap.Finemap.finemap_v1(
z_scores=numpy.array(z_scores),
beta_scores=numpy.array(betas),
cov_matrix=cluster.ld_matrix,
n=500, # TODO extract eQTL sample sizes
labels=[ld_snp.rsID for ld_snp in cluster.ld_snps],
sample_label=sample_label,
lambdas=lambdas,
mafs=cluster.mafs,
annotations=cluster.annotations,
kstart = postgap.Globals.KSTART,
kmax = postgap.Globals.KMAX,
isGWAS=False
)
def sign(number):
"""
Returns the sign of the number (-1, 0 or 1)
Arg1: float
Returntype: int
"""
if number > 0:
return 1
elif number < 0:
return -1
else:
return 0
def z_score_from_pvalue(p_value, direction):
"""
Estimates z-score from p-value and effect direction
Arg1: float
Arg2: float
Returntype: float
"""
global scipy
if scipy is None:
import scipy
import scipy.stats
if p_value == 0:
p_value = 4.2e-317
return -scipy.stats.norm.ppf(p_value/2) * sign(direction)
|
|
#!/usr/bin/env pythoni
# coding:utf-8
#
# kobuki.py
from serial import Serial
import numpy as np
import cv2
import time
import csv
import os
from datetime import datetime
from primesense import openni2#, niite2
from primesense import _openni2 as c_api
# Path to OpenNI redistribution OpenNI2.so
dist = '/usr/lib/'
class kobuki :
# initialize Kobuki. OpenCV and 3D camera
def __init__(self, dev_path) :
self.dev_path = dev_path
def initialize(self) :
#initialize OpenNI2
openni2.initialize(dist) #
if (openni2.is_initialized()):
print "openNI2 initialized"
else:
print "openNI2 not initialized"
## Register the device
self.dev = openni2.Device.open_any()
## Create the streams stream
self.rgb_stream = self.dev.create_color_stream()
self.depth_stream = self.dev.create_depth_stream()
## Configure the depth_stream -- changes automatically based on bus speed
#print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
self.depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=320, resolutionY=240, fps=30))
## Check and configure the mirroring -- default is True
## Note: I enabled mirroring
# print 'Mirroring info1', depth_stream.get_mirroring_enabled()
#depth_stream.set_mirroring_enabled(False)
#rgb_stream.set_mirroring_enabled(False)
## Start the streams
self.rgb_stream.start()
self.depth_stream.start()
## Synchronize the streams
self.dev.set_depth_color_sync_enabled(True) # synchronize the streams
## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
self.dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)
print("ASUS Xtion Pro Initialized")
#initialize Serial communication
self.serial = Serial(self.dev_path, 115200)
print("Serial connection to Kobuki initialized")
# Set save path here
self.save_path = "data"+str(time.time()).replace(".","_")+"/"
os.system("mkdir "+self.save_path)
#Initialize CSV file
self.csv_file = open(self.save_path+'data.csv', "wb")
self.csv_writer = csv.writer(self.csv_file, delimiter=' ', quotechar='|', quoting=csv.QUOTE_NONE)
# throttle and steering variables
self.thr = 0
self.steer = 0.5
self.image_count = 1
def get_rgb(self):
"""
Returns numpy 3L ndarray to represent the rgb image.
"""
self.bgr = np.fromstring(self.rgb_stream.read_frame().get_buffer_as_uint8(),dtype=np.uint8).reshape(240,320,3)
self.rgb = cv2.cvtColor(self.bgr,cv2.COLOR_BGR2RGB)
return self.rgb
def get_depth(self):
"""
Returns numpy ndarrays representing the raw and ranged depth images.
Outputs:
dmap:= distancemap in mm, 1L ndarray, dtype=uint16, min=0, max=2**12-1
d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255
Note1:
fromstring is faster than asarray or frombuffer
Note2:
.reshape(120,160) #smaller image for faster response
OMAP/ARM default video configuration
.reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)
Requires .set_video_mode
"""
self.dmap = np.fromstring(self.depth_stream.read_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(240,320) # Works & It's FAST
self.d4d = np.uint8(self.dmap.astype(float) *255/ 2**12-1) # Correct the range. Depth images are 12bits
self.d4d = 255 - cv2.cvtColor(self.d4d,cv2.COLOR_GRAY2RGB)
return self.dmap, self.d4d
# send a command to Kobuki
def send(self, commands) :
sub_payloads = commands
payload = []
for sub_payload in sub_payloads :
payload.append(sub_payload)
header = [0xAA, 0x55]
body = [len(payload)] + payload
checksum = 0
for x in body:
checksum ^= x
packets = header+body+[checksum]
#print(packets)
self.serial.write(''.join(map(chr, packets)))
# destructor
# def __del__(self) :
def deinitialize(self) :
self.stop()
self.serial.close()
print("Serial connection to Kobuki closed")
## Release resources
self.csv_file.close()
print("CSV file closed")
cv2.destroyAllWindows()
print("OpenCV terminated")
#rgb = self.get_rgb()
#_,d4d = self.get_depth()
self.rgb_stream.stop()
self.depth_stream.stop()
openni2.unload()
print ("OpenNI Terminated")
# Prepare byte string for speed and turn radius of Kobuki
def base_control(self, speed, radius) :
speed_lsb = 0xff & speed
speed_msb = 0xff & (speed>>8)
radius_lsb = 0xff & radius
radius_msb = 0xff & (radius>>8)
return [0x01, 0x04, speed_lsb, speed_msb, radius_lsb, radius_msb]
# Stop the Kobuki
def stop(self):
self.send(self.base_control(0,0))
# Map throttle values to values input by Kobuki
'''
def drive(self, thr, steer) :
#map throttle (0-100) to speed value
if(thr>100) :
print("Throttle limit exceeded")
thr = 100
if(thr<0) :
print("Throttle value out of bounds, setting to zero")
thr = 0
speed = thr*3;
#map steering (0-100)i
if(steer>100) :
print("Steering limit exceeded, setting to 100%")
radius = 100
if(steer<0) :
print("Steer value out of bounds, setting to zero")
steer = 0
radius = -300+steer*6
#print("throttle : ", thr, "steer : ", steer)
#drive robot
self.send(self.base_control(speed,radius))
'''
def drive(self, thr, steer) :
radius = 0
speed = 0
if (thr == 1) :
speed = 300
elif (thr == 0) :
speed = 0
else :
print("Wrong throttle value, setting to zero")
speed = 0
if (steer == 0.5) :
radius = 0
elif (steer == 0) :
radius = 300
elif (steer == 1) :
radius = -300
else :
print("Wrong steering values, setting to 0.5")
radius = 0
self.send(self.base_control(speed,radius))
# Save images+steering+throttle data
def save_data(self, steer, thr) :
# Get RGB + Depth images
rgb = self.get_rgb()
_,d4d = self.get_depth()
# Prepare strings for save paths
time_str = self.save_path+'{:06d}'.format(self.image_count)
rgb_str = time_str+"_rgb.jpg"
depth_str = time_str+"_depth.jpg"
# Write images
cv2.imwrite(rgb_str, rgb)
cv2.imwrite(depth_str, d4d)
#cv2.imshow('display', rgb)
# Write throttle+steer into CSV
self.csv_writer.writerow([rgb_str]+[depth_str]+[str(thr)]+[str(int(steer==0))]+[str(int(steer==0.5))]+[str(int(steer==1))])
self.image_count = self.image_count + 1
# Main run loop
def run(self) :
# Create window for robot control input
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
# step values. Change to increase/decrease acceleration
#thr_step = 2i
#steer_step = 1
# Start measuring time here
start_time = time.time()
# Main loop
while (True) :
char = '\0'
char = cv2.waitKey(10) & 255
if(char == 27) :
print("\tEscape key detected!")
break
elif char == ord('w'):
self.thr = 1
self.steer = 0.5
#if(self.thr>100) :
# self.thr = 100
elif char == ord('a') :
self.thr = 1
self.steer = 0
elif char == ord('d') :
self.thr = 1
self.steer = 1
elif char == ord('s') :
self.steer = 0.5
self.thr = 0
self.drive(self.thr,self.steer)
end_time = time.time()
if((end_time-start_time)>0.1) :
self.save_data(self.steer, self.thr)
start_time = start_time + 0.1
# Stop robot and exit
self.stop()
if __name__ == '__main__' :
#os.system("rm -r data/")
#os.system("mkdir data/")
kob = kobuki('/dev/ttyUSB0')
kob.initialize()
kob.run()
kob.deinitialize()
# make videos
#os.system("sh make_video.sh")
|
|
import pickle, datetime, os, shutil
import percolation as P
from twython import Twython
from twython import TwythonStreamer
import parsedatetime as pdt
parser=pdt.Calendar()
from dateutil.parser import parse
from maccess import tw2 as tw
c=P.utils.check
def publishSearch(fname,fpath,aname=None,scriptpath=None,created_at=None,tweets_info="a hashtag or a topic (probably)",donated_by="labMacambira.sf.net",latin=False,utf8_fix=True,acquired_through="Twitter search engine",umbrella_dir=None):
if not aname:
print(fname,aname)
aname=fname.split("/")[-1].split(".")[0]
if not aname.endswith("_tw"):
aname+="_tw"
PREFIX="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir)
tweets=[]
try:
tweets+=P.utils.pRead2( fname)[0]
except:
c("nao tem " +fname)
#try:
# tweets+=P.utils.pRead2(fname.replace(".pickle","_.pickle"))
#except:
# c("nao tem " +fname.replace(".pickle","_.pickle"))
fname__=fname.replace(".pickle","_.pickle")
if os.path.isfile(fname__):
tweets,fopen=P.utils.pRead3(fname__,tweets)
#tweets=[i for j in tweets for i in j][:10000*30]
#tweet_chuncks=[tweets[i:i+10000] for i in range(0,len(tweets),10000)]
#tweets=[i for j in tweets for i in j][:270]
#tweet_chuncks=[tweets[i:i+100] for i in range(0,len(tweets),100)]
# tweet_chuncks=[tweets[i:i+100] for i in range(0,len(tweets),100)]
ccount=0
fpath_="{}{}/".format(fpath,aname)
nnicks=0
nicks_=[]
nreplies=0
nretweets=0
nmsgs=0
dates1=[]
dates2=[]
#for chunck in tweet_chuncks:
while tweets:
c("chunck {}".format(ccount))
tg=P.rdf.makeBasicGraph([["po","tw"],[P.rdf.ns.po,P.rdf.ns.tw]],"Twitter messages linked data, chuck {:05d}".format(ccount))
for tweet in tweets:
tid=tweet["id_str"]
imsg=P.rdf.IC([tg],P.rdf.ns.tw.Message,tid)
uris=[P.rdf.ns.tw.messageID]
data=[tid]
msg=tweet["text"]
uris+=[P.rdf.ns.tw.messageContent]
data+=[msg]
uris+=[P.rdf.ns.tw.retweetCount]
data+=[tweet["retweet_count"]]
uris+=[P.rdf.ns.tw.lang]
data+=[tweet["lang"]]
uris+=[P.rdf.ns.tw.sentAt]
date=parse(tweet["created_at"])
data+=[date]
if tweet["in_reply_to_user_id"]:
uris+=[P.rdf.ns.tw.inReplyToUID]
data+=[tweet["in_reply_to_user_id"]]
P.rdf.link([tg],imsg,None,uris,data)
if "retweeted_status" in tweet.keys():
tid2=tweet["retweeted_status"]["id_str"]
imsg2=P.rdf.IC([tg],P.rdf.ns.tw.Message,tid2)
uris=[P.rdf.ns.tw.messageID]
data=[tid2]
uris+=[P.rdf.ns.tw.precedingMessageContent]
data+=[tweet["retweeted_status"]["text"]]
P.rdf.link([tg],imsg,None,uris,data)
sid2=tweet["retweeted_status"]["user"]["screen_name"]
iuser2=P.rdf.IC([tg],P.rdf.ns.tw.Participant,sid2)
uris=[P.rdf.ns.tw.sid]
data=[sid2]
uris+=[P.rdf.ns.tw.uid]
data+=[tweet["retweeted_status"]["user"]["id_str"]]
data+=[tweet["retweeted_status"]["user"]["name"]]
uris+=[P.rdf.ns.tw.name]
P.rdf.link([tg],iuser2,None,uris,data)
sid=tweet["user"]["screen_name"]
iuser=P.rdf.IC([tg],P.rdf.ns.tw.Participant,sid)
uris=[P.rdf.ns.tw.sid]
data=[sid]
uris+=[P.rdf.ns.tw.uid]
data+=[tweet["user"]["id_str"]]
if tweet["user"]["location"]:
uris+=[P.rdf.ns.tw.uLocation]
data+=[tweet["user"]["location"]]
data+=[tweet["user"]["favourites_count"]]
uris+=[P.rdf.ns.tw.favouritesCount]
data+=[tweet["user"]["followers_count"]]
uris+=[P.rdf.ns.tw.followersCount]
data+=[tweet["user"]["friends_count"]]
uris+=[P.rdf.ns.tw.friendsCount]
if tweet["user"]["utc_offset"]:
data+=[tweet["user"]["utc_offset"]]
uris+=[P.rdf.ns.tw.utcOffset]
P.rdf.link([tg],iuser,None,uris,data)
uris=[P.rdf.ns.tw.author]
uris2=[iuser]
if "retweeted_status" in tweet.keys():
uris+=[P.rdf.ns.tw.retweetOf]
uris2+=[imsg2]
P.rdf.link_([tg],imsg,None,uris,uris2)
# linka msg com usuarios
# e usuarios entre si?
# achar a id da mensagem aa qual esta eh retweet ou resposta
# fazer as uris corretamente para o user e o reply to
# achar as hashtags e colocar jah
P.rdf.writeAll(tg,aname+"Translate{:05d}".format(ccount),fpath_,False,False)
#if not os.path.isdir(fpath_+"base"):
# os.mkdir(fpath_+"base")
#P.utils.pDump(tweets,fpath_+"base/"+"{}{:04d}.pickle".format(aname,ccount))
ccount+=1
nnicks+=P.utils.countMe( tg[0],"tw:author")
nicks = P.utils.getAll2( tg[0],"tw:author")
nicks_+=[i.split("#")[-1] for i in nicks]
nreplies += P.utils.countMe(tg[0],"tw:inReplyToUID")
nretweets+=P.utils.countMe( tg[0],"tw:retweetOf")
nmsgs += P.utils.countMe( tg[0], "tw:messageContent")
dates=[i for i in tg[0].query(r"SELECT ?p WHERE {?s tw:sentAt ?p} ORDER BY ASC(?p)")]
dates1+=[dates[0][0].value]
dates2+=[dates[-1][0].value]
tweets=[]
if os.path.isfile(fname__):
tweets,fopen=P.utils.pRead3(None,tweets,fopen)
date1=min(dates1)
date2=max(dates2)
c("tudo em RDF")
#tg_=[tg[0]+tg2[0],tg[1]]
#fpath_="{}/{}/".format(fpath,aname)
#P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# shutil.copy(fname,fpath_+"base/")
#i=0
#for chunck in tweet_chuncks:
# P.utils.pDump(chunck,fpath_+"base/"+"{}{:04d}.pickle".format(aname,i))
# i+=1
# faz um README
#dates=[i for i in tg_[0].query(r"SELECT ?p WHERE {?s tw:sentAt ?p} ORDER BY ASC(?p)")]
#date1=dates[0][0].value
#date2=dates[-1][0].value
#return tg_
#nicks=queryMe(tg_[0],"SELECT ?s ?o WHERE {?s irc:nick ?o}")
#nnicks=P.utils.countMe( tg_[0],"tw:author")
#nicks= P.utils.getAll2( tg_[0],"tw:author")
#nicks_=[i.split("#")[-1] for i in nicks]
#nreplies= P.utils.countMe(tg_[0],"tw:inReplyToUID")
#nretweets=P.utils.countMe( tg_[0],"tw:retweetOf")
#nmsgs= P.utils.countMe( tg_[0], "tw:messageContent")
tg2=P.rdf.makeBasicGraph([["po","tw"],[P.rdf.ns.po,P.rdf.ns.tw]],"Metadata for the snapshot of Twitter messages")
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
repourl="https://github.com/OpenLinkedSocialData/{}tree/master/{}".format(umbrella_dir,aname)
metaurl="{}rdf/{}Meta.owl".format(PREFIX,aname)
mvals=[]
muris=[]
for i in range(ccount):
translate_url="{}rdf/{}Translate{:05d}".format(PREFIX,aname,i)
muris+=[P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile]
mvals+=[translate_url+".owl",
translate_url+".ttl"]
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
# P.rdf.ns.po.originalFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
P.rdf.ns.tw.nParticipants,
P.rdf.ns.tw.nMessages,
P.rdf.ns.tw.nReTweets,
P.rdf.ns.tw.nReplies,
]+muris,
[created_at,
datetime.datetime.now(),
donated_by,
repourl,
# "https://raw.githubusercontent.com/ttm/{}/master/base/".format(aname),
metaurl,
"{}rdf/{}Meta.ttl".format(PREFIX,aname),
acquired_through,
"The Twitter messages related to {}".format(tweets_info),
nnicks,
nmsgs,
nretweets,
nreplies
]+mvals)
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the Twitter messages about {}
collected around {}, with messages from {} to {} and {} participants.
Total messages count {} of which {} are replies and {} are retweets
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Twitter in data/\n
Metadata for discovery is in file:
{}
All files should be available at the git repository:
{}
\n
""".format(
tweets_info,created_at,date1,date2,
nnicks,nmsgs,nreplies,nretweets,
repourl,metaurl))
return tg, tweets
class Twitter:
"""Simplified Twitter interface for Stability observance
# function to set authentication: __init__()
# function to set hashtag and other tweets selection criteria: searchTag()
# function to search tweets: searchTag()
# function to stream tweets: void
"""
TWITTER_API_KEY = tw.tak
TWITTER_API_KEY_SECRET = tw.taks
TWITTER_ACCESS_TOKEN = tw.tat
TWITTER_ACCESS_TOKEN_SECRET = tw.tats
def __init__(self,app_key= None,
app_secret= None,
oauth_token= None,
oauth_token_secret=None,):
"""Start twitter seach and stream interface"""
if not app_key:
self.app_key= self.TWITTER_API_KEY
self.app_secret= self.TWITTER_API_KEY_SECRET
self.oauth_token= self.TWITTER_ACCESS_TOKEN
self.oauth_token_secret=self.TWITTER_ACCESS_TOKEN_SECRET
else:
self.app_key= app_key
self.app_secret= app_secret
self.oauth_token= oauth_token
self.oauth_token_secret=oauth_token_secret
def streamTag(self,HTAG="#python",aname=None):
if not aname:
aname=HTAG[1:]+"_tw"
stream=MyStreamer(self.app_key ,
self.app_secret ,
self.oauth_token ,
self.oauth_token_secret)
stream.putName(aname)
self.stream=stream
stream.statuses.filter(track=HTAG)
def finishStream(self):
self.stream.D.close()
def searchTag(self,HTAG="#python"):
"""Set Twitter search or stream criteria for the selection of tweets"""
self.t = Twython(app_key =self.app_key ,
app_secret =self.app_secret ,
oauth_token =self.oauth_token ,
oauth_token_secret =self.oauth_token_secret)
search =self.t.search(q=HTAG,count=100,result_type="recent")
ss=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type="recent")
while seach:
ss+=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
self.ss=ss
#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type="recent")
# function to set authentication: __init__()
# function to set hashtag and other tweets selection criteria: searchTag()
# function to search tweets: searchTag()
# function to stream tweets: void
class MyStreamer(TwythonStreamer):
C=[]
i=1
def putName(self,aname):
fname="../data/tw/{}_.pickle".format(aname)
#if os.path.isfile(fname):
# self.C=P.utils.pRead(fname)
self.aname=aname
self.fname=fname
self.D=P.utils.Dumper(fname)
def on_success(self, data):
if 'text' in data:
self.C.append(data)
print(data['text'])
if self.i%100==0:
self.D.dump(self.C)
self.C=[]
print(self.i); self.i+=1
def on_error(self, status_code, data):
print(status_code)
#print "iniciando streaming"
#stream=MyStreamer(tw.tak,tw.taks,tw.tat,tw.tats)
#stream.statuses.filter(track=HTAG)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Altbit address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Altbit address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import unittest
import itertools
import numpy
import six
import chainer
from chainer import backend
from chainer import functions
from chainer import testing
from chainer.utils import conv
from chainer.utils import type_check
def xs_iter(dims):
return itertools.product(*[range(d) for d in dims])
def kxs_iter(x, outs, ksize, stride, pad):
return itertools.product(
*[range(max(0, -p + s * _x), min(-p + s * _x + k, out))
for (_x, out, k, s, p) in zip(x, outs, ksize, stride, pad)])
def expected_unpooling_nd(x_data, outs, ksize, stride, pad):
N, c = x_data.shape[:2]
dims = x_data.shape[2:]
y_expected_shape = (N, c) + outs
y_expected = numpy.zeros(y_expected_shape, dtype=x_data.dtype)
for i in six.moves.range(N):
for _c in six.moves.range(c):
for x in xs_iter(dims):
x_idx = (i, _c) + x
for kx in kxs_iter(x, outs, ksize, stride, pad):
y_idx = (i, _c) + kx
y_expected[y_idx] += x_data[x_idx]
return y_expected
@testing.parameterize(*(testing.product({
'dims': [(5,), (2, 3, 4)],
'_ksize': [3],
'_stride': [3],
'_pad': [1],
'cover_all': [True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}) + testing.product({
'dims': [(3, 2)],
'_ksize': [1, 2, 3],
'_stride': [1, 2, 3],
'_pad': [0, 1],
'cover_all': [True, False],
'dtype': [numpy.float32],
})))
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward',
'test_consistency_regression_forward',
'test_consistency_regression_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestUnpoolingND(testing.FunctionTestCase):
def setUp(self):
N = 2
c = 3
self.ndim = len(self.dims)
self.ksize = (self._ksize,) * self.ndim
self.stride = (self._stride,) * self.ndim
self.pad = (self._pad,) * self.ndim
self.x_shape = (N, c) + self.dims
self.outs = tuple(
conv.get_deconv_outsize(d, k, s, p, cover_all=self.cover_all)
for (d, k, s, p)
in zip(self.dims, self.ksize, self.stride, self.pad))
self.gy_shape = (N, c) + self.outs
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 2 ** -4, 'rtol': 2 ** -4}
self.check_backward_options = {'atol': 2 ** -4, 'rtol': 2 ** -4}
self.check_double_backward_options = {}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 3e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
outs = self.gy_shape[2:]
y_expected = expected_unpooling_nd(
x, outs, self.ksize, self.stride, self.pad)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.unpooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=self.cover_all)
return y,
def check_forward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
inputs, = self.generate_inputs()
x = chainer.Variable(backend_config.get_array(inputs))
ksize = self.ksize
stride = self.stride
pad = self.pad
y_nd = functions.unpooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = functions.unpooling_2d(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
testing.assert_allclose(
y_nd.array, y_2d.array, **self.check_forward_options)
def test_consistency_regression_forward(self, backend_config):
if len(self.dims) == 2:
self.check_forward_consistency_regression(backend_config)
def check_backward_consistency_regression(self, backend_config):
# Regression test to two-dimensional unpooling layer.
x_data, = self.generate_inputs()
gy_data = numpy.random.uniform(-1, 1, self.gy_shape).astype(self.dtype)
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = backend.get_array_module(x_data)
# Backward computation for N-dimensional unpooling layer.
x_nd = chainer.Variable(xp.array(x_data))
y_nd = functions.unpooling_nd(
x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional unpooling layer.
x_2d = chainer.Variable(xp.array(x_data))
y_2d = functions.unpooling_2d(
x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
opt = self.check_backward_options
testing.assert_allclose(
x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
def test_consistency_regression_backward(self, backend_config):
ndim = len(self.dims)
if ndim == 2:
self.check_backward_consistency_regression(backend_config)
@testing.parameterize(*testing.product({
'outsize': [(10,), (10, 9), (10, 9, 8)],
'_ksize': [1, 2, 3],
'_stride': [1, 2, 3],
'_pad': [0, 1],
'cover_all': [True, False],
}))
class TestUnpoolingNDOutsize(unittest.TestCase):
def setUp(self):
self.N = 2
self.c = 3
ndim = len(self.outsize)
self.ksize = (self._ksize,) * ndim
self.stride = (self._stride,) * ndim
self.pad = (self._pad,) * ndim
def test_valid_insize(self):
N = self.N
c = self.c
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
x_shape = (N, c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Compute unpooling.
y = functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
# Test output's value.
y_expected = expected_unpooling_nd(x_data, outs, ksize, stride, pad)
testing.assert_allclose(y_expected, y.data)
def test_invalid_insize(self):
ksize = self.ksize
stride = self.stride
pad = self.pad
outs = self.outsize
cover_all = self.cover_all
# Make input with invalid shape.
dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
for (out, k, s, p) in zip(outs, ksize, stride, pad))
dims = tuple(d + 1 for d in dims) # Make invalid input shape.
x_shape = (self.N, self.c) + dims
x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
x = chainer.Variable(x_data)
# Computing unpooling raises exception.
with self.assertRaises(type_check.InvalidType):
functions.unpooling_nd(
x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
class TestUnpoolingNDWrappers(unittest.TestCase):
def _get_data(self, ndim):
x_shape = (2, 3) + (3,) * ndim
dtype = numpy.float32
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
ksize = (2,) * ndim
return x, ksize
def test_unpooling_1d(self):
(x, ksize) = self._get_data(1)
testing.assert_allclose(
functions.unpooling_nd(x, ksize).data,
functions.unpooling_1d(x, ksize).data)
def test_unpooling_1d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.unpooling_1d(x, ksize)
def test_unpooling_3d(self):
(x, ksize) = self._get_data(3)
testing.assert_allclose(
functions.unpooling_nd(x, ksize).data,
functions.unpooling_3d(x, ksize).data)
def test_unpooling_3d_invalid(self):
(x, ksize) = self._get_data(2)
with self.assertRaises(ValueError):
functions.unpooling_3d(x, ksize)
testing.run_module(__name__, __file__)
|
|
import os
from datetime import datetime, timedelta
from focus import errors
from focus.task import Task
from focus.plugin import registration
from focus_unittest import FocusTestCase, MockPlugin
_ACTIVE_FILE_DATA = """active_task {
name "test";
start_time "2012-04-23 15:18:22.000000";
}
"""
_TASK_FILE_DATA = """task {
test_opt 12345;
test_block {
test_opt name;
test_opt "name 2";
}
}
"""
class TestTask(FocusTestCase):
def _get_pidfile(self):
return os.path.join(self.task.base_dir, '.focusd.pid')
def setUp(self):
super(TestTask, self).setUp()
self.setup_dir()
self.task = Task(base_dir=self.test_dir)
base_dir = self.task._paths['base_dir']
self.task._paths['task_dir'] = os.path.join(base_dir, 'tasks', 'test')
os.makedirs(self.task._paths['task_dir'])
# make task config
self.task_cfg = os.path.join(self.task.task_dir, 'task.cfg')
open(self.task_cfg, 'w', 0).write(_TASK_FILE_DATA)
# register some options from the test task config to a mock plugin.
for k in ('test_opt', 'test_block_test_opt'):
registration.register('option', k, MockPlugin, {})
def tearDown(self):
self.task = None
registration._option_hooks.clear()
registration._registered.clear()
super(TestTask, self).tearDown()
def test___reset(self):
""" Task._reset: correct class attributes are reset.
"""
self.task._name = 'test'
self.task._start_time = 'AAAA'
self.task._owner = 999
self.task._paths['task_dir'] = 'AAA'
self.task._paths['task_config'] = 'AAA'
self.task._loaded = True
self.task._reset()
self.assertIsNone(self.task._name)
self.assertIsNone(self.task._start_time)
self.assertEqual(self.task._owner, os.getuid())
self.assertIsNone(self.task._paths['task_dir'])
self.assertIsNone(self.task._paths['task_config'])
self.assertIsNotNone(self.task._loaded)
self.assertIsNotNone(self.task._paths['base_dir'])
self.assertIsNotNone(self.task._paths['active_file'])
def test___save_active_file(self):
""" Task._save_active_file: saves active file properly.
"""
self.task._name = 'test'
self.task._start_time = datetime(2012, 04, 23, 15, 18, 22)
self.task._owner = 1000
self.task._save_active_file()
self.assertEqual(open(self.task._paths['active_file'], 'r').read(),
_ACTIVE_FILE_DATA)
def testPidFileExistValid___clean_prior(self):
""" task._clean_prior: pid file exists and is valid, remove file.
"""
# write example pid file
filename = self._get_pidfile()
open(filename, 'w', 0).write('999999\n')
self.task._loaded = True
self.assertTrue(self.task._clean_prior())
self.assertFalse(os.path.isfile(filename)) # was removed
def testNoPidFile___clean_prior(self):
""" task._clean_prior: no pid file exists, do nothing.
"""
self.task._loaded = True
self.assertFalse(self.task._clean_prior())
def testPidFileExistsInvalid___clean_prior(self):
""" task._clean_prior: invalid pid file exists, do nothing.
"""
# write invalid pid file
filename = self._get_pidfile()
open(filename, 'w', 0).write('a#*)#&@!(b\n')
self.task._loaded = True
self.assertTrue(self.task._clean_prior())
self.assertTrue(os.path.isfile(filename)) # didn't remove
def test___clean(self):
""" Task._clean: active file is removed.
"""
open(self.task._paths['active_file'], 'w', 0).write('')
self.task._clean()
self.assertFalse(os.path.isfile(self.task._paths['active_file']))
def testValidActiveFile__load(self):
""" Task.load: loads a task if the active file is available.
"""
open(self.task._paths['active_file'], 'w', 0).write(_ACTIVE_FILE_DATA)
self.task.load()
self.assertEqual(self.task._name, 'test')
dt = datetime(2012, 04, 23, 15, 18, 22)
self.assertEqual(self.task._start_time, dt)
self.assertEqual(self.task._owner, os.getuid())
def testInvalidActiveFile__load(self):
""" Task.load: will not load a task if the active file is missing or
invalid.
"""
self.task.load()
self.assertIsNone(self.task._name)
open(self.task._paths['active_file'], 'w', 0).write('INVALID FILE')
self.task.load()
self.assertIsNone(self.task._name)
data = _ACTIVE_FILE_DATA[:len(_ACTIVE_FILE_DATA) / 2]
open(self.task._paths['active_file'], 'w', 0).write(data)
self.task.load()
self.assertIsNone(self.task._name)
# removes active file if it was invalid
self.assertFalse(os.path.isfile(self.task._paths['active_file']))
def testTaskValid__start(self):
""" Task.start: starts a task if task exists and is valid.
"""
self.assertTrue(self.task.start('test'))
self.assertTrue(os.path.isfile(self.task._paths['active_file']))
self.assertEqual(self.task._name, 'test')
def testTaskNonExist__start(self):
""" Task.start: fails if task doesn't exist.
"""
with self.assertRaises(errors.TaskNotFound):
self.task.start('non-exist')
def testTaskActive__start(self):
""" Task.start: fails if task is loaded.
"""
self.task._loaded = True
self.task._name = 'test'
with self.assertRaises(errors.ActiveTask):
self.task.start('test')
def testTaskInvalidTaskConfig__start(self):
""" Task.start: fails if task config for specified task is invalid.
"""
data = _TASK_FILE_DATA
open(self.task_cfg, 'w', 0).write('INVALID FILE')
with self.assertRaises(errors.InvalidTaskConfig):
self.task.start('test')
open(self.task_cfg, 'w', 0).write(data.replace('task {', 'invalid {'))
with self.assertRaises(errors.InvalidTaskConfig):
self.task.start('test')
open(self.task_cfg, 'w', 0).write(data.replace('{', '#'))
with self.assertRaises(errors.InvalidTaskConfig):
self.task.start('test')
def testNoActive__stop(self):
""" Task.stop: fails if no task active.
"""
self.task._loaded = False
with self.assertRaises(errors.NoActiveTask):
self.task.stop()
def test__exists(self):
""" Task.exists: returns correct value for task existence.
"""
self.assertTrue(self.task.exists('test'))
self.assertFalse(self.task.exists('non-exist'))
def test__get_config_path(self):
""" Task.get_config_path: returns correct task config path.
"""
self.assertEqual(self.task.get_config_path('test'),
os.path.join(self.task.base_dir, 'tasks', 'test',
'task.cfg'))
def testTaskExists__create(self):
""" Task.create: fails to create task.
"""
with self.assertRaises(errors.TaskExists):
self.task.create('test')
def testNewNoClone__create(self):
""" Task.create: creates new task.
"""
# create default task file
self.task._default_task_config = self.make_file(_TASK_FILE_DATA)
self.task.create('new_task')
task_dir = os.path.join(self.task.base_dir, 'tasks', 'new_task')
task_cfg = os.path.join(task_dir, 'task.cfg')
self.assertTrue(os.path.isdir(task_dir))
self.assertTrue(os.path.isfile(task_cfg))
# confirm default task file was used as template
with open(task_cfg, 'r') as file_:
self.assertEqual(file_.read(), _TASK_FILE_DATA)
def testNewFromClone__create(self):
""" Task.create: creates new task from existing task.
"""
self.task.create('new_task2', 'test')
task_dir = os.path.join(self.task.base_dir, 'tasks', 'new_task2')
self.assertTrue(os.path.isdir(task_dir))
self.assertTrue(os.path.isfile(os.path.join(task_dir, 'task.cfg')))
def testInvalidName__create(self):
""" Task.create: fails if invalid name provided.
"""
with self.assertRaises(ValueError):
self.task.create(None)
with self.assertRaises(ValueError):
self.task.create('-sup')
def test__rename(self):
""" Task.rename: reanmes task folder.
"""
# test non-existent old task path
with self.assertRaises(errors.TaskNotFound):
self.task.rename('non-exist', 'other')
# test existing new task path
test_path = os.path.join(self.task.base_dir, 'tasks', 'new-test')
os.makedirs(test_path)
with self.assertRaises(errors.TaskExists):
self.task.rename('test', 'new-test')
# test same names
with self.assertRaises(ValueError):
self.task.rename('test', 'test')
# successful task rename
self.assertTrue(self.task.rename('test', 'test2'))
old_dir_path = os.path.join(self.task.base_dir, 'tasks', 'test')
new_dir_path = os.path.join(self.task.base_dir, 'tasks', 'test2')
self.assertFalse(os.path.exists(old_dir_path))
self.assertTrue(os.path.exists(new_dir_path))
def test__remove(self):
""" Task.remove: removes task folder.
"""
self.assertTrue(self.task.remove('test'))
dir_path = os.path.join(self.task.base_dir, 'tasks', 'test')
self.assertFalse(os.path.exists(dir_path))
self.assertFalse(self.task.remove('non-exist'))
def test__get_list_info(self):
""" Task.get_list_info: returns valid tasks and info.
"""
# list all
info = self.task.get_list_info()
self.assertEqual(info[0][0], 'test')
self.assertEqual(list(info[0][1]), [['test_opt', ['12345']]])
self.assertEqual(list(info[0][2]), [
['test_block', [
['test_opt', ['name']],
['test_opt', ['name 2']]
]]
])
# existing match
info = self.task.get_list_info('test')
self.assertEqual(info[0][0], 'test')
self.assertEqual(list(info[0][1]), [['test_opt', ['12345']]])
self.assertEqual(list(info[0][2]), [
['test_block', [
['test_opt', ['name']],
['test_opt', ['name 2']]
]]
])
# non-exist match
info = self.task.get_list_info('non-exist')
self.assertEqual(info, [])
def testActiveTask__stop(self):
""" Task.stop: removes active file for task.
"""
self.task._loaded = True
open(self.task._paths['active_file'], 'w', 0).write('')
self.task.stop()
self.assertFalse(os.path.isfile(self.task._paths['active_file']))
def test__set_total_duration(self):
""" Task.set_total_duration: Correctly sets total task duration.
"""
self.task.set_total_duration(15)
self.assertEqual(self.task._total_duration, 15)
def test__dunderStr(self):
""" Task.__str__: returns proper str version.
"""
self.assertEqual(str(self.task),
'Task (name=<No Name>, duration=<1m)')
self.task._name = 'Test'
self.assertEqual(str(self.task),
'Task (name=Test, duration=<1m)')
def test__dunderUnicode(self):
""" Task.__unicode__: returns proper unicode version.
"""
self.assertEqual(unicode(self.task),
u'Task (name=<No Name>, duration=<1m)')
self.task._name = 'Test'
self.assertEqual(unicode(self.task),
u'Task (name=Test, duration=<1m)')
def testNotLoaded__active(self):
""" Task.active (property): false when task not loaded.
"""
self.assertFalse(self.task.active)
def testNoActiveFile__active(self):
""" Task.active (property): false if active file not found.
"""
self.task._loaded = True
self.assertFalse(self.task.active) # no active file
def testLoadedActiveFile__active(self):
""" Task.active (property): true if active file exists and task loaded.
"""
self.task._loaded = True
open(self.task._paths['active_file'], 'w', 0).write('')
self.assertTrue(self.task.active)
def testNoName__name(self):
""" Task.name (property): no name correct value.
"""
self.assertEqual(self.task.name, u'<No Name>')
def testHasName__name(self):
""" Task.name (property): name correct value.
"""
self.task._name = u'bob'
self.assertEqual(self.task.name, u'bob')
def testDefaultOwner__owner(self):
""" Task.owner (property): default value.
"""
self.assertEqual(self.task.owner, os.getuid())
def testHasOwner__owner(self):
""" Task.owner (property): owner correct value.
"""
self.task._owner = 1000
self.assertEqual(self.task.owner, 1000)
def testTaskNotLoaded__duration(self):
""" Task.duration (property): task not loaded, returns 0.
"""
self.task._loaded = False
self.task._start_time = datetime.now() + timedelta(minutes=-15)
self.assertEqual(self.task.duration, 0)
def testTaskLoaded__duration(self):
""" Task.duration (property): task loaded, returns correct duration.
"""
self.task._loaded = True
self.task._start_time = datetime.now() + timedelta(minutes=-15)
self.assertEqual(self.task.duration, 15)
def test__elapsed(self):
""" Task.elapsed (property): returns correct elapsed status.
"""
self.task._loaded = True
# not elapsed
self.task._start_time = datetime.now() + timedelta(minutes=-15)
self.task._total_duration = 30
self.assertFalse(self.task.elapsed)
# elapsed
self.task._start_time = datetime.now() + timedelta(minutes=-15)
self.task._total_duration = 15
self.assertTrue(self.task.elapsed)
# elapsed, overrun
self.task._total_duration = 15
self.task._start_time = datetime.now() + timedelta(minutes=-25)
self.assertTrue(self.task.elapsed)
def test__base_dir(self):
""" Task.base_dir (property): returns correct base_dir.
"""
self.assertEqual(self.task.base_dir, self.test_dir)
def test__task_dir(self):
""" Task.task_dir (property): returns correct task_dir.
"""
path = os.path.join(self.test_dir, 'tasks', 'test')
self.assertEqual(self.task.task_dir, path)
|
|
"""
Module for fetching artifacts from Artifactory
"""
import http.client
import logging
import os
import urllib.request
import xml.etree.ElementTree as ET
from urllib.error import HTTPError, URLError
import salt.utils.files
import salt.utils.hashutils
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "artifactory"
def __virtual__():
"""
Only load if elementtree xml library is available.
"""
return True
def get_latest_snapshot(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets latest snapshot of the given artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION: artifactory.get_latest_snapshot,"
" artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s,"
" target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
artifact_metadata = _get_artifact_metadata(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
version = artifact_metadata["latest_version"]
snapshot_url, file_name = _get_snapshot_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=classifier,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_snapshot(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
snapshot_version=None,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets snapshot of the desired version of the artifact
artifactory_url
URL of artifactory instance
repository
Snapshot repository in artifactory to retrieve artifact from, for example: libs-snapshots
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-snapshot_version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_snapshot(artifactory_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
snapshot_url, file_name = _get_snapshot_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
packaging=packaging,
snapshot_version=snapshot_version,
classifier=classifier,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(snapshot_url, target_file, headers)
def get_latest_release(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets the latest release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_latest_release(artifactory_url=%s, repository=%s,"
" group_id=%s, artifact_id=%s, packaging=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
version = __find_latest_version(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
)
release_url, file_name = _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier,
use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def get_release(
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir="/tmp",
target_file=None,
classifier=None,
username=None,
password=None,
use_literal_group_id=False,
):
"""
Gets the specified release of the artifact
artifactory_url
URL of artifactory instance
repository
Release repository in artifactory to retrieve artifact from, for example: libs-releases
group_id
Group Id of the artifact
artifact_id
Artifact Id of the artifact
packaging
Packaging type (jar,war,ear,etc)
version
Version of the artifact
target_dir
Target directory to download artifact to (default: /tmp)
target_file
Target file to download artifact to (by default it is target_dir/artifact_id-version.packaging)
classifier
Artifact classifier name (ex: sources,javadoc,etc). Optional parameter.
username
Artifactory username. Optional parameter.
password
Artifactory password. Optional parameter.
"""
log.debug(
"======================== MODULE FUNCTION:"
" artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s,"
" artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)",
artifactory_url,
repository,
group_id,
artifact_id,
packaging,
version,
target_dir,
classifier,
)
headers = {}
if username and password:
headers["Authorization"] = "Basic {}".format(
salt.utils.hashutils.base64_encodestring(
"{}:{}".format(username.replace("\n", ""), password.replace("\n", ""))
)
)
release_url, file_name = _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier,
use_literal_group_id,
)
target_file = __resolve_target_file(file_name, target_dir, target_file)
return __save_artifact(release_url, target_file, headers)
def __resolve_target_file(file_name, target_dir, target_file=None):
if target_file is None:
target_file = os.path.join(target_dir, file_name)
return target_file
def _get_snapshot_url(
artifactory_url,
repository,
group_id,
artifact_id,
version,
packaging,
snapshot_version=None,
classifier=None,
headers=None,
use_literal_group_id=False,
):
if headers is None:
headers = {}
has_classifier = classifier is not None and classifier != ""
if snapshot_version is None:
try:
snapshot_version_metadata = _get_snapshot_version_metadata(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
if (
not has_classifier
and packaging not in snapshot_version_metadata["snapshot_versions"]
):
error_message = """Cannot find requested packaging '{packaging}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise ArtifactoryError(error_message)
packaging_with_classifier = (
packaging if not has_classifier else packaging + ":" + classifier
)
if (
has_classifier
and packaging_with_classifier
not in snapshot_version_metadata["snapshot_versions"]
):
error_message = """Cannot find requested classifier '{classifier}' in the snapshot version metadata.
artifactory_url: {artifactory_url}
repository: {repository}
group_id: {group_id}
artifact_id: {artifact_id}
packaging: {packaging}
classifier: {classifier}
version: {version}""".format(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
packaging=packaging,
classifier=classifier,
version=version,
)
raise ArtifactoryError(error_message)
snapshot_version = snapshot_version_metadata["snapshot_versions"][
packaging_with_classifier
]
except CommandExecutionError as err:
log.error(
"Could not fetch maven-metadata.xml. Assuming snapshot_version=%s.",
version,
)
snapshot_version = version
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
file_name = "{artifact_id}-{snapshot_version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
snapshot_version=snapshot_version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
snapshot_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("snapshot_url=%s", snapshot_url)
return snapshot_url, file_name
def _get_release_url(
repository,
group_id,
artifact_id,
packaging,
version,
artifactory_url,
classifier=None,
use_literal_group_id=False,
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
file_name = "{artifact_id}-{version}{classifier}.{packaging}".format(
artifact_id=artifact_id,
version=version,
packaging=packaging,
classifier=__get_classifier_url(classifier),
)
release_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/{file_name}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
file_name=file_name,
)
log.debug("release_url=%s", release_url)
return release_url, file_name
def _get_artifact_metadata_url(
artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
artifact_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/maven-metadata.xml".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
)
log.debug("artifact_metadata_url=%s", artifact_metadata_url)
return artifact_metadata_url
def _get_artifact_metadata_xml(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
artifact_metadata_url = _get_artifact_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(artifact_metadata_url, None, headers)
artifact_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
artifact_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("artifact_metadata_xml=%s", artifact_metadata_xml)
return artifact_metadata_xml
def _get_artifact_metadata(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
metadata_xml = _get_artifact_metadata_xml(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
headers=headers,
use_literal_group_id=use_literal_group_id,
)
root = ET.fromstring(metadata_xml)
assert group_id == root.find("groupId").text
assert artifact_id == root.find("artifactId").text
latest_version = root.find("versioning").find("latest").text
return {"latest_version": latest_version}
# functions for handling snapshots
def _get_snapshot_version_metadata_url(
artifactory_url,
repository,
group_id,
artifact_id,
version,
use_literal_group_id=False,
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
snapshot_version_metadata_url = "{artifactory_url}/{repository}/{group_url}/{artifact_id}/{version}/maven-metadata.xml".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
version=version,
)
log.debug("snapshot_version_metadata_url=%s", snapshot_version_metadata_url)
return snapshot_version_metadata_url
def _get_snapshot_version_metadata_xml(
artifactory_url,
repository,
group_id,
artifact_id,
version,
headers,
use_literal_group_id=False,
):
snapshot_version_metadata_url = _get_snapshot_version_metadata_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(snapshot_version_metadata_url, None, headers)
snapshot_version_metadata_xml = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
snapshot_version_metadata_url, err
)
raise CommandExecutionError(message)
log.debug("snapshot_version_metadata_xml=%s", snapshot_version_metadata_xml)
return snapshot_version_metadata_xml
def _get_snapshot_version_metadata(
artifactory_url, repository, group_id, artifact_id, version, headers
):
metadata_xml = _get_snapshot_version_metadata_xml(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
version=version,
headers=headers,
)
metadata = ET.fromstring(metadata_xml)
assert group_id == metadata.find("groupId").text
assert artifact_id == metadata.find("artifactId").text
assert version == metadata.find("version").text
snapshot_versions = metadata.find("versioning").find("snapshotVersions")
extension_version_dict = {}
for snapshot_version in snapshot_versions:
extension = snapshot_version.find("extension").text
value = snapshot_version.find("value").text
if snapshot_version.find("classifier") is not None:
classifier = snapshot_version.find("classifier").text
extension_version_dict[extension + ":" + classifier] = value
else:
extension_version_dict[extension] = value
return {"snapshot_versions": extension_version_dict}
def __get_latest_version_url(
artifactory_url, repository, group_id, artifact_id, use_literal_group_id=False
):
group_url = __get_group_id_subpath(group_id, use_literal_group_id)
# for released versions the suffix for the file is same as version
latest_version_url = "{artifactory_url}/api/search/latestVersion?g={group_url}&a={artifact_id}&repos={repository}".format(
artifactory_url=artifactory_url,
repository=repository,
group_url=group_url,
artifact_id=artifact_id,
)
log.debug("latest_version_url=%s", latest_version_url)
return latest_version_url
def __find_latest_version(
artifactory_url,
repository,
group_id,
artifact_id,
headers,
use_literal_group_id=False,
):
latest_version_url = __get_latest_version_url(
artifactory_url=artifactory_url,
repository=repository,
group_id=group_id,
artifact_id=artifact_id,
use_literal_group_id=use_literal_group_id,
)
try:
request = urllib.request.Request(latest_version_url, None, headers)
version = urllib.request.urlopen(request).read()
except (HTTPError, URLError) as err:
message = "Could not fetch data from url: {}. ERROR: {}".format(
latest_version_url, err
)
raise CommandExecutionError(message)
log.debug("Response of: %s", version)
if version is None or version == "":
raise ArtifactoryError("Unable to find release version")
return version
def __save_artifact(artifact_url, target_file, headers):
log.debug("__save_artifact(%s, %s)", artifact_url, target_file)
result = {"status": False, "changes": {}, "comment": ""}
if os.path.isfile(target_file):
log.debug("File %s already exists, checking checksum...", target_file)
checksum_url = artifact_url + ".sha1"
checksum_success, artifact_sum, checksum_comment = __download(
checksum_url, headers
)
if checksum_success:
artifact_sum = salt.utils.stringutils.to_unicode(artifact_sum)
log.debug("Downloaded SHA1 SUM: %s", artifact_sum)
file_sum = __salt__["file.get_hash"](path=target_file, form="sha1")
log.debug("Target file (%s) SHA1 SUM: %s", target_file, file_sum)
if artifact_sum == file_sum:
result["status"] = True
result["target_file"] = target_file
result["comment"] = (
"File {} already exists, checksum matches with Artifactory.\n"
"Checksum URL: {}".format(target_file, checksum_url)
)
return result
else:
result["comment"] = (
"File {} already exists, checksum does not match with"
" Artifactory!\nChecksum URL: {}".format(target_file, checksum_url)
)
else:
result["status"] = False
result["comment"] = checksum_comment
return result
log.debug("Downloading: %s -> %s", artifact_url, target_file)
try:
request = urllib.request.Request(artifact_url, None, headers)
f = urllib.request.urlopen(request)
with salt.utils.files.fopen(target_file, "wb") as local_file:
local_file.write(salt.utils.stringutils.to_bytes(f.read()))
result["status"] = True
result["comment"] = __append_comment(
"Artifact downloaded from URL: {}".format(artifact_url),
result["comment"],
)
result["changes"]["downloaded_file"] = target_file
result["target_file"] = target_file
except (HTTPError, URLError) as e:
result["status"] = False
result["comment"] = __get_error_comment(e, artifact_url)
return result
def __get_group_id_subpath(group_id, use_literal_group_id=False):
if not use_literal_group_id:
group_url = group_id.replace(".", "/")
return group_url
return group_id
def __get_classifier_url(classifier):
has_classifier = classifier is not None and classifier != ""
return "-" + classifier if has_classifier else ""
def __download(request_url, headers):
log.debug("Downloading content from %s", request_url)
success = False
content = None
comment = None
try:
request = urllib.request.Request(request_url, None, headers)
url = urllib.request.urlopen(request)
content = url.read()
success = True
except HTTPError as e:
comment = __get_error_comment(e, request_url)
return success, content, comment
def __get_error_comment(http_error, request_url):
if http_error.code == http.client.NOT_FOUND:
comment = "HTTP Error 404. Request URL: " + request_url
elif http_error.code == http.client.CONFLICT:
comment = (
"HTTP Error 409: Conflict. Requested URL: {}. \nThis error may be caused by"
" reading snapshot artifact from non-snapshot repository.".format(
request_url
)
)
else:
comment = "HTTP Error {err_code}. Request URL: {url}".format(
err_code=http_error.code, url=request_url
)
return comment
def __append_comment(new_comment, current_comment=""):
return current_comment + "\n" + new_comment
class ArtifactoryError(Exception):
def __init__(self, value):
super().__init__()
self.value = value
def __str__(self):
return repr(self.value)
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
from blinkbuild.name_style_converter import NameStyleConverter
from .composition_parts import WithDebugInfo
from .composition_parts import WithExtendedAttributes
from .composition_parts import WithIdentifier
from .extended_attribute import ExtendedAttributes
from .reference import RefById
from .reference import RefByIdFactory
from .typedef import Typedef
from .user_defined_type import UserDefinedType
# The implementation class hierarchy of IdlType
#
# IdlType
# + SimpleType
# + ReferenceType
# + DefinitionType
# + TypedefType
# + _ArrayLikeType
# | + SequenceType
# | + FrozenArrayType
# | + VariadicType
# + RecordType
# + PromiseType
# + UnionType
# + NullableType
_IDL_TYPE_PASS_KEY = object()
class IdlTypeFactory(object):
"""
Creates a group of instances of IdlType, over which you can iterate later.
There are two phases; instantiation phase and iteration phase. The factory
is initially in the instantiation phase and you can create instances of
IdlType. Once it enters to the iteration phase (through the first attempt
to iterate), you can no longer create a new instance.
"""
def __init__(self):
self._idl_types = []
# Factory to initialize instances of ReferenceType.
self._ref_by_id_factory = RefByIdFactory(
target_attrs_with_priority=RefById.get_all_attributes(IdlType))
# |_is_frozen| is initially False and you can create new instances of
# IdlType. The first invocation of |for_each| freezes the factory and
# you can no longer create a new instance of IdlType.
self._is_frozen = False
def for_each(self, callback):
"""
Applies |callback| to all the instances of IdlType created by this
factory.
Instantiation of IdlType is no longer possible.
Args:
callback: A callable that takes an IdlType as only the argument.
Return value is not used.
"""
assert callable(callback)
self._is_frozen = True
for idl_type in self._idl_types:
callback(idl_type)
def for_each_reference(self, callback):
"""
Applies |callback| to all the instances of IdlType that is referencing
to another IdlType.
Instantiation of referencing IdlType is no longer possible, but it's
still possible to instantiate other IdlTypes.
Args:
callback: A callable that takes an IdlType as only the argument.
Return value is not used.
"""
self._ref_by_id_factory.for_each(callback)
def simple_type(self, *args, **kwargs):
return self._create(SimpleType, args, kwargs)
def reference_type(self, *args, **kwargs):
assert 'ref_by_id_factory' not in kwargs
kwargs['ref_by_id_factory'] = self._ref_by_id_factory
return self._create(ReferenceType, args, kwargs)
def definition_type(self, *args, **kwargs):
return self._create(DefinitionType, args, kwargs)
def typedef_type(self, *args, **kwargs):
return self._create(TypedefType, args, kwargs)
def sequence_type(self, *args, **kwargs):
return self._create(SequenceType, args, kwargs)
def frozen_array_type(self, *args, **kwargs):
return self._create(FrozenArrayType, args, kwargs)
def variadic_type(self, *args, **kwargs):
return self._create(VariadicType, args, kwargs)
def record_type(self, *args, **kwargs):
return self._create(RecordType, args, kwargs)
def promise_type(self, *args, **kwargs):
return self._create(PromiseType, args, kwargs)
def union_type(self, *args, **kwargs):
return self._create(UnionType, args, kwargs)
def nullable_type(self, *args, **kwargs):
return self._create(NullableType, args, kwargs)
def _create(self, idl_type_concrete_class, args, kwargs):
assert not self._is_frozen
idl_type = idl_type_concrete_class(
*args, pass_key=_IDL_TYPE_PASS_KEY, **kwargs)
self._idl_types.append(idl_type)
return idl_type
class IdlType(WithExtendedAttributes, WithDebugInfo):
"""
Represents a 'type' in Web IDL.
IdlType is an interface of types in Web IDL, and also provides all the
information that is necessary for type conversions. For example, given the
conversion rules of ECMAScript bindings, you can produce a type converter
between Blink types and V8 types with using an IdlType.
Note that IdlType is designed to _not_ include knowledge about a certain
language bindings (such as ECMAScript bindings), thus it's out of scope for
IdlType to tell whether IDL dictionary type accepts ES null value or not.
Nullable type and typedef are implemented as if they're a container type
like record type and promise type.
"""
class Optionality(object):
"""https://heycam.github.io/webidl/#dfn-optionality-value"""
class Type(str):
pass
REQUIRED = Type('required')
OPTIONAL = Type('optional')
VARIADIC = Type('variadic')
def __init__(self,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(is_optional, bool)
assert pass_key is _IDL_TYPE_PASS_KEY
WithExtendedAttributes.__init__(
self, extended_attributes, readonly=True)
WithDebugInfo.__init__(self, debug_info)
self._is_optional = is_optional
def __eq__(self, other):
"""Returns True if |self| and |other| represent the equivalent type."""
return (self.__class__ == other.__class__
and ExtendedAttributes.equals(self.extended_attributes,
other.extended_attributes)
and self.is_optional == other.is_optional)
def __ne__(self, other):
return not self == other
def __hash__(self):
raise NotImplementedError()
def make_copy(self, memo):
return self
@property
def syntactic_form(self):
"""
Returns a text representation of the type in the form of Web IDL syntax.
"""
raise NotImplementedError()
@property
def type_name(self):
"""
Returns the type name.
https://heycam.github.io/webidl/#dfn-type-name
Note that a type name is not necessarily unique.
"""
return '{}{}'.format(
self.type_name_without_extended_attributes, ''.join(
sorted(self.effective_annotations.keys())))
@property
def type_name_with_extended_attribute_key_values(self):
name_pieces = []
name_pieces.append(self.type_name_without_extended_attributes)
annotations = self.effective_annotations
for key in sorted(annotations.keys()):
name_pieces.append(key)
name_pieces.extend(annotations.values_of(key))
return ''.join(name_pieces)
@property
def type_name_without_extended_attributes(self):
raise NotImplementedError()
@property
def keyword_typename(self):
"""
Returns the keyword name of the type if this is a simple built-in type,
e.g. "any", "boolean", "unsigned long long", "void", etc. Otherwise,
returns None.
"""
return None
def apply_to_all_composing_elements(self, callback):
"""
Applies |callback| to all instances of IdlType of which this IdlType
consists, including |self|.
In case of x.apply_to_all_composing_elements(callback), |callback| will
be recursively called back on x, x.inner_type, x.element_type,
x.result_type.original_type, etc. if any.
If |callback| raises a StopIteration, then this function stops
traversing deeper than this type (inner type, etc.), however, siblings
are still traversed. E.g. For record<K, V>, raising a StopIteration at
K doesn't prevent from traversing V.
"""
try:
callback(self)
except StopIteration:
return
def unwrap(self, nullable=None, typedef=None, variadic=None):
"""
Returns the body part of the actual type, i.e. returns the interesting
part of this type.
Args:
nullable:
typedef:
variadic:
All these arguments take tri-state value: True, False, or None.
True unwraps that type, False stops unwrapping that type. All
of specified arguments' values must be consistent, and mixture
of True and False is not allowed. Unspecified arguments are
automatically set to the opposite value. If no argument is
specified, unwraps all types.
"""
switches = {
'nullable': nullable,
'typedef': typedef,
'variadic': variadic,
}
value_counts = {None: 0, False: 0, True: 0}
for value in switches.values():
assert value is None or isinstance(value, bool)
value_counts[value] += 1
assert value_counts[False] == 0 or value_counts[True] == 0, (
"Specify only True or False arguments. Unspecified arguments are "
"automatically set to the opposite value.")
default = value_counts[True] == 0
for arg, value in switches.items():
if value is None:
switches[arg] = default
return self._unwrap(switches)
@property
def effective_annotations(self):
"""
Returns the extended attributes associated with this IDL type.
https://heycam.github.io/webidl/#idl-type-extended-attribute-associated-with
For example, given the following IDL fragments,
typedef [ExtAttr1] long NewLong;
void f([ExtAttr2] NewLong arg);
arg.idl_type.extended_attributes returns [ExtAttr2],
arg.idl_type.unwrap().extended_attributes returns [ExtAttr1], and
arg.idl_type.effective_annotations returns [ExtAttr1, ExtAttr2].
"""
return self.extended_attributes
@property
def does_include_nullable_type(self):
"""
Returns True if this type includes a nulllable type.
https://heycam.github.io/webidl/#dfn-includes-a-nullable-type
"""
return False
@property
def does_include_nullable_or_dict(self):
"""
Returns True if this type includes a nullable type or a dictionary type.
IdlType's own definition of "includes a dictionary type" just follows
the definition of "includes a nullable type".
"""
return False
@property
def is_numeric(self):
"""
Returns True if this is an integer type or floating point number type.
"""
return False
@property
def is_integer(self):
"""Returns True if this is an integer type."""
return False
@property
def is_floating_point_numeric(self):
"""Returns True if this is a floating point numeric type."""
return False
@property
def is_boolean(self):
"""Returns True if this is boolean."""
return False
@property
def is_string(self):
"""
Returns True if this is one of DOMString, ByteString, or USVString.
"""
return False
@property
def is_buffer_source_type(self):
"""Returns True if this is a buffer source type."""
return False
@property
def is_array_buffer(self):
"""Returns True if this is ArrayBuffer."""
return False
@property
def is_array_buffer_view(self):
"""Returns True if this is ArrayBufferView."""
return False
@property
def is_data_view(self):
"""Returns True if this is DataView."""
return False
@property
def is_typed_array_type(self):
"""Returns True if this is a typed array type."""
return False
@property
def is_object(self):
"""
Returns True if this is exactly type 'object'.
Note that this method doesn't return True for an interface or dictionary
type, or type 'any'.
"""
return False
@property
def is_symbol(self):
"""Returns True if this is type 'symbol'."""
return False
@property
def is_any(self):
"""Returns True if this is type 'any'."""
return False
@property
def is_void(self):
"""Returns True if this is type 'void'."""
return False
@property
def is_interface(self):
"""Returns True if this is an interface type."""
return False
@property
def is_dictionary(self):
"""Returns True if this is a dictionary type."""
return False
@property
def is_enumeration(self):
"""Returns True if this is an enumeration type."""
return False
@property
def is_callback_interface(self):
"""Returns True if this is a callback interface type."""
return False
@property
def is_callback_function(self):
"""Returns True if this is a callback function type."""
return False
@property
def is_typedef(self):
"""
Returns True if this is a typedef.
Despite that 'typedef' in Web IDL is not a type, IdlType treats typedefs
as type-wrapping-type just like nullable type and promise type. You can
access the typedef'ed type through |original_type|.
"""
return False
@property
def is_sequence(self):
"""Returns True if this is a sequence type."""
return False
@property
def is_frozen_array(self):
"""Returns True if this is a froen array type."""
return False
@property
def is_record(self):
"""Returns True if this is a record type."""
return False
@property
def is_promise(self):
"""Returns True if this is a promise type."""
return False
@property
def is_union(self):
"""Returns True if this is a union type."""
return False
@property
def is_nullable(self):
"""
Returns True if this is a nullable type.
NOTE: If |self| is a union type which includes a nullable type, this
returns False, because |self| itself is not a nullable type. Use
|does_include_nullable_type| in such a case.
"""
return False
@property
def is_optional(self):
"""
Returns True if this type is used for a non-required dictionary member
or an optional argument.
"""
return self._is_optional
@property
def is_variadic(self):
"""
Returns True if this represents variadic arguments' type.
Variadic argument type is represented as a type-wrapping-type like
sequence type. You can access the type of each argument through
|element_type|.
"""
return False
@property
def optionality(self):
"""Returns the optionality value."""
if self.is_variadic:
return IdlType.Optionality.VARIADIC
if self.is_optional:
return IdlType.Optionality.OPTIONAL
return IdlType.Optionality.REQUIRED
@property
def original_type(self):
"""Returns the typedef'ed type."""
return None
@property
def element_type(self):
"""
Returns the element type if |is_sequence|, |is_frozen_array|, or
|is_variadic|.
"""
return None
@property
def key_type(self):
"""Returns the key type if |is_record|."""
return None
@property
def value_type(self):
"""Returns the value type if |is_record|."""
return None
@property
def result_type(self):
"""Returns the result type if |is_promise|."""
return None
@property
def member_types(self):
"""Returns member types if |is_union|."""
return None
@property
def flattened_member_types(self):
"""
Returns a set of flattened member types if |is_union|.
https://heycam.github.io/webidl/#dfn-flattened-union-member-types
Note that this is not simple flattening, and a nullable type will be
unwrapped. Annotated types are always unwrapped but you can access it
through extended_attributes in IdlType. Typedef is unwrapped.
"""
return None
@property
def inner_type(self):
"""Returns the inner type of type IdlType if |is_nullable|."""
return None
@property
def type_definition_object(self):
"""
Returns an object that represents a spec-author-defined type or None.
Note that a returned object is not an IdlType. In case of interface,
a returned object is an instance of Interface.
"""
return None
@property
def union_definition_object(self):
"""
Returns an object that represents an union or None.
Note that a returned object is not an IdlType. It's of type Union.
"""
return None
def _format_syntactic_form(self, syntactic_form_inner):
"""Helper function to implement |syntactic_form|."""
optional_form = 'optional ' if self.is_optional else ''
ext_attr_form = ('{} '.format(self.extended_attributes.syntactic_form)
if self.extended_attributes else '')
return '{}{}{}'.format(optional_form, ext_attr_form,
syntactic_form_inner)
def _unwrap(self, switches):
return self
class SimpleType(IdlType):
"""
Represents built-in types that do not contain other types internally.
e.g. primitive types, string types, and object types.
https://heycam.github.io/webidl/#idl-types
"""
_INTEGER_TYPES = ('byte', 'octet', 'short', 'unsigned short', 'long',
'unsigned long', 'long long', 'unsigned long long')
_FLOATING_POINT_NUMERIC_TYPES = ('float', 'unrestricted float', 'double',
'unrestricted double')
_NUMERIC_TYPES = _FLOATING_POINT_NUMERIC_TYPES + _INTEGER_TYPES
_STRING_TYPES = ('DOMString', 'ByteString', 'USVString')
_TYPED_ARRAY_TYPES = ('Int8Array', 'Int16Array', 'Int32Array',
'Uint8Array', 'Uint16Array', 'Uint32Array',
'Uint8ClampedArray', 'Float32Array', 'Float64Array')
# ArrayBufferView is not defined as a buffer source type in Web IDL, it's
# defined as an union type of all typed array types. However, practically
# it's much more convenient and reasonable for most of (if not all) use
# cases to treat ArrayBufferView as a buffer source type than as an union
# type.
# https://heycam.github.io/webidl/#ArrayBufferView
#
# Note that BufferSource is an union type as defined in Web IDL.
# https://heycam.github.io/webidl/#BufferSource
_BUFFER_SOURCE_TYPES = (
('ArrayBuffer', 'ArrayBufferView', 'DataView') + _TYPED_ARRAY_TYPES)
_MISC_TYPES = ('any', 'boolean', 'object', 'symbol', 'void')
_VALID_TYPES = set(_NUMERIC_TYPES + _STRING_TYPES + _BUFFER_SOURCE_TYPES +
_MISC_TYPES)
def __init__(self,
name,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert name in SimpleType._VALID_TYPES, (
'Unknown type name: {}'.format(name))
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._name = name
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.syntactic_form == other.syntactic_form)
def __hash__(self):
return hash(self._name)
@property
def syntactic_form(self):
return self._format_syntactic_form(self._name)
@property
def type_name_without_extended_attributes(self):
name = 'String' if self._name == 'DOMString' else self._name
return NameStyleConverter(name).to_upper_camel_case()
@property
def keyword_typename(self):
return self._name
@property
def is_numeric(self):
return self._name in SimpleType._NUMERIC_TYPES
@property
def is_integer(self):
return self._name in SimpleType._INTEGER_TYPES
@property
def is_floating_point_numeric(self):
return self._name in SimpleType._FLOATING_POINT_NUMERIC_TYPES
@property
def is_boolean(self):
return self._name == 'boolean'
@property
def is_string(self):
return self._name in SimpleType._STRING_TYPES
@property
def is_buffer_source_type(self):
return self._name in SimpleType._BUFFER_SOURCE_TYPES
@property
def is_array_buffer(self):
return self._name == 'ArrayBuffer'
@property
def is_array_buffer_view(self):
return self._name == 'ArrayBufferView'
@property
def is_data_view(self):
return self._name == 'DataView'
@property
def is_typed_array_type(self):
return self._name in SimpleType._TYPED_ARRAY_TYPES
@property
def is_object(self):
return self._name == 'object'
@property
def is_symbol(self):
return self._name == 'symbol'
@property
def is_any(self):
return self._name == 'any'
@property
def is_void(self):
return self._name == 'void'
class ReferenceType(IdlType, RefById):
"""
Represents a type specified with the given identifier.
As the exact type definitions are unknown in early compilation phases, it
will be resolved in very end of the compilation phases. Once everything is
resolved, a ReferenceType behaves as a proxy to the resolved type.
'typedef' in Web IDL is not a type, but we have TypedefType. The
identifier may be resolved to a TypedefType.
"""
def __init__(self,
identifier,
is_optional=False,
extended_attributes=None,
debug_info=None,
ref_by_id_factory=None,
pass_key=None):
assert isinstance(ref_by_id_factory, RefByIdFactory)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
ref_by_id_factory.init_subclass_instance(
self, identifier=identifier, debug_info=debug_info)
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.identifier == other.identifier)
def __hash__(self):
return hash(self.identifier)
def _unwrap(self, switches):
return self.target_object._unwrap(switches)
class DefinitionType(IdlType, WithIdentifier):
"""
Represents a spec-author-defined type, e.g. interface type and dictionary
type.
Typedef and union type are not included. They are represented as
TypedefType and UnionType respectively.
"""
def __init__(self, reference_type, user_defined_type, pass_key=None):
assert isinstance(reference_type, ReferenceType)
assert isinstance(user_defined_type, UserDefinedType)
IdlType.__init__(
self,
is_optional=reference_type.is_optional,
extended_attributes=reference_type.extended_attributes,
debug_info=reference_type.debug_info,
pass_key=pass_key)
WithIdentifier.__init__(self, user_defined_type.identifier)
self._type_definition_object = user_defined_type
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.identifier == other.identifier)
def __hash__(self):
return hash(self.identifier)
@property
def syntactic_form(self):
return self._format_syntactic_form(self.identifier)
@property
def type_name_without_extended_attributes(self):
return self.identifier
@property
def does_include_nullable_or_dict(self):
return self.is_dictionary
@property
def is_interface(self):
return self.type_definition_object.is_interface
@property
def is_callback_interface(self):
return self.type_definition_object.is_callback_interface
@property
def is_dictionary(self):
return self.type_definition_object.is_dictionary
@property
def is_enumeration(self):
return self.type_definition_object.is_enumeration
@property
def is_callback_function(self):
return self.type_definition_object.is_callback_function
@property
def type_definition_object(self):
return self._type_definition_object
class TypedefType(IdlType, WithIdentifier):
"""
Represents a typedef definition as an IdlType.
'typedef' in Web IDL is not a type, however, there are use cases that have
interest in typedef names. Thus, the IDL compiler does not resolve
typedefs transparently (i.e. does not remove typedefs entirely), and
IdlTypes representing typedefs remain and behave like NullableType. You
can track down the typedef'ed type to |original_type|.
"""
def __init__(self, reference_type, typedef, pass_key=None):
assert isinstance(reference_type, ReferenceType)
assert isinstance(typedef, Typedef)
IdlType.__init__(
self,
is_optional=reference_type.is_optional,
extended_attributes=reference_type.extended_attributes,
debug_info=reference_type.debug_info,
pass_key=pass_key)
WithIdentifier.__init__(self, typedef.identifier)
self._typedef = typedef
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.identifier == other.identifier)
def __hash__(self):
return hash(self.identifier)
@property
def syntactic_form(self):
return self._format_syntactic_form(self.identifier)
@property
def type_name_without_extended_attributes(self):
return self.original_type.type_name_without_extended_attributes
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
self.original_type.apply_to_all_composing_elements(callback)
@property
def effective_annotations(self):
original_annotations = self.original_type.effective_annotations
if not self.extended_attributes:
return original_annotations
if not original_annotations:
return self.extended_attributes
return ExtendedAttributes(
list(self.extended_attributes) + list(original_annotations))
@property
def does_include_nullable_type(self):
return self.original_type.does_include_nullable_type
@property
def does_include_nullable_or_dict(self):
return self.original_type.does_include_nullable_or_dict
@property
def is_typedef(self):
return True
@property
def original_type(self):
return self._typedef.idl_type
def _unwrap(self, switches):
if switches['typedef']:
return self.original_type._unwrap(switches)
return self
class _ArrayLikeType(IdlType):
def __init__(self,
element_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(element_type, IdlType)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._element_type = element_type
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.element_type == other.element_type)
def __hash__(self):
return hash((self.__class__, self.element_type))
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
self.element_type.apply_to_all_composing_elements(callback)
@property
def element_type(self):
return self._element_type
class SequenceType(_ArrayLikeType):
"""https://heycam.github.io/webidl/#idl-sequence"""
def __init__(self,
element_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
_ArrayLikeType.__init__(
self,
element_type,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
@property
def syntactic_form(self):
return self._format_syntactic_form('sequence<{}>'.format(
self.element_type.syntactic_form))
@property
def type_name_without_extended_attributes(self):
return '{}Sequence'.format(self.element_type.type_name)
@property
def is_sequence(self):
return True
class FrozenArrayType(_ArrayLikeType):
"""https://heycam.github.io/webidl/#idl-frozen-array"""
def __init__(self,
element_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
_ArrayLikeType.__init__(
self,
element_type,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
@property
def syntactic_form(self):
return self._format_syntactic_form('FrozenArray<{}>'.format(
self.element_type.syntactic_form))
@property
def type_name_without_extended_attributes(self):
return '{}Array'.format(self.element_type.type_name)
@property
def is_frozen_array(self):
return True
class VariadicType(_ArrayLikeType):
"""Represents a type used for variadic arguments."""
def __init__(self, element_type, debug_info=None, pass_key=None):
_ArrayLikeType.__init__(
self, element_type, debug_info=debug_info, pass_key=pass_key)
@property
def syntactic_form(self):
assert not self.extended_attributes
assert not self.is_optional
return '{}...'.format(self.element_type.syntactic_form)
@property
def type_name_without_extended_attributes(self):
# Blink-specific expansion of type name
# The type name of a variadic type is the concatenation of the type
# name of the element type and the string "Variadic".
assert not self.extended_attributes
return '{}Variadic'.format(self.element_type.type_name)
@property
def is_variadic(self):
return True
def _unwrap(self, switches):
if switches['variadic']:
return self.element_type._unwrap(switches)
return self
class RecordType(IdlType):
"""https://heycam.github.io/webidl/#idl-record"""
def __init__(self,
key_type,
value_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(key_type, IdlType)
assert isinstance(value_type, IdlType)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._key_type = key_type
self._value_type = value_type
def __eq__(self, other):
return (IdlType.__eq__(self, other) and self.key_type == other.key_type
and self.value_type == other.value_type)
def __hash__(self):
return hash((self.__class__, self.key_type, self.value_type))
@property
def syntactic_form(self):
return self._format_syntactic_form('record<{}, {}>'.format(
self.key_type.syntactic_form, self.value_type.syntactic_form))
@property
def type_name_without_extended_attributes(self):
return '{}{}Record'.format(self.key_type.type_name,
self.value_type.type_name)
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
self.key_type.apply_to_all_composing_elements(callback)
self.value_type.apply_to_all_composing_elements(callback)
@property
def is_record(self):
return True
@property
def key_type(self):
return self._key_type
@property
def value_type(self):
return self._value_type
class PromiseType(IdlType):
"""https://heycam.github.io/webidl/#idl-promise"""
def __init__(self,
result_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(result_type, IdlType)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._result_type = result_type
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.result_type == other.result_type)
def __hash__(self):
return hash((self.__class__, self.result_type))
@property
def syntactic_form(self):
return self._format_syntactic_form('Promise<{}>'.format(
self.result_type.syntactic_form))
@property
def type_name_without_extended_attributes(self):
return '{}Promise'.format(self.result_type.type_name)
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
self.result_type.apply_to_all_composing_elements(callback)
@property
def is_promise(self):
return True
@property
def result_type(self):
"""Returns the result type."""
return self._result_type
class UnionType(IdlType):
"""https://heycam.github.io/webidl/#idl-union"""
def __init__(self,
member_types,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(member_types, (list, tuple))
assert all(isinstance(member, IdlType) for member in member_types)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._member_types = tuple(member_types)
self._union_definition_object = None
def __eq__(self, other):
"""
__eq__ is defined so that
(A or B) == (B or A)
but
(A? or B) != (A or B?),
(A or (B or C)) != ((A or B) or C), and
(A or B) != (A or C) where C is typedef'ed to B.
In short, the order of member types is not taken into account, but
anything else is taken into account. This is mostly consistent with
that X != Y where Y is typedef'ed to X.
"""
return (IdlType.__eq__(self, other)
and set(self.member_types) == set(other.member_types))
def __hash__(self):
return hash((self.__class__,
functools.reduce(lambda x, idl_type: x + hash(idl_type),
self.member_types, 0)))
@property
def syntactic_form(self):
return self._format_syntactic_form('({})'.format(' or '.join(
[member.syntactic_form for member in self.member_types])))
@property
def type_name_without_extended_attributes(self):
return 'Or'.join([member.type_name for member in self.member_types])
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
for member_type in self.member_types:
member_type.apply_to_all_composing_elements(callback)
@property
def does_include_nullable_type(self):
return any(
member.does_include_nullable_type for member in self.member_types)
@property
def does_include_nullable_or_dict(self):
return any(member.does_include_nullable_or_dict
for member in self.member_types)
@property
def is_union(self):
return True
@property
def member_types(self):
return self._member_types
@property
def flattened_member_types(self):
def flatten(idl_type):
if idl_type.is_union:
return functools.reduce(
lambda x, idl_type: x + flatten(idl_type),
idl_type.member_types, [])
elif idl_type.is_typedef:
return flatten(idl_type.original_type)
elif idl_type.is_nullable:
return flatten(idl_type.inner_type)
else:
return [idl_type]
return set(flatten(self))
@property
def union_definition_object(self):
return self._union_definition_object
def set_union_definition_object(self, union_definition_object):
# In Python2, we need to avoid circular imports.
from .union import Union
assert isinstance(union_definition_object, Union)
assert self._union_definition_object is None
self._union_definition_object = union_definition_object
class NullableType(IdlType):
"""https://heycam.github.io/webidl/#idl-nullable-type"""
def __init__(self,
inner_type,
is_optional=False,
extended_attributes=None,
debug_info=None,
pass_key=None):
assert isinstance(inner_type, IdlType)
IdlType.__init__(
self,
is_optional=is_optional,
extended_attributes=extended_attributes,
debug_info=debug_info,
pass_key=pass_key)
self._inner_type = inner_type
def __eq__(self, other):
return (IdlType.__eq__(self, other)
and self.inner_type == other.inner_type)
def __hash__(self):
return hash((self.__class__, self.inner_type))
@property
def syntactic_form(self):
assert not self.extended_attributes
return '{}?'.format(self.inner_type.syntactic_form)
@property
def type_name_without_extended_attributes(self):
# https://heycam.github.io/webidl/#idl-annotated-types
# Web IDL seems not supposing a case of [X] ([Y] Type)?, i.e. something
# like [X] nullable<[Y] Type>, which should turn into "TypeYOrNullX".
#
# In case of '[Clamp] long?', it's interpreted as '([Clamp] long)?' but
# the type name must be "LongOrNullClamp" instead of "LongClampOrNull".
assert not self.extended_attributes
return '{}OrNull'.format(
self.inner_type.type_name_without_extended_attributes)
def apply_to_all_composing_elements(self, callback):
try:
callback(self)
except StopIteration:
return
self.inner_type.apply_to_all_composing_elements(callback)
@property
def effective_annotations(self):
assert not self.extended_attributes
return self.inner_type.effective_annotations
@property
def does_include_nullable_type(self):
return True
@property
def does_include_nullable_or_dict(self):
return True
@property
def is_nullable(self):
return True
@property
def inner_type(self):
return self._inner_type
def _unwrap(self, switches):
if switches['nullable']:
return self.inner_type._unwrap(switches)
return self
|
|
import os
import logging
from datetime import datetime, timedelta
import requests
from flask import Flask, request, session, url_for, redirect, abort, jsonify, render_template
app = Flask(__name__)
app.secret_key = os.environ['FLASK_SECRET_KEY']
logging.basicConfig(level='DEBUG')
GITLAB_HOST = os.environ['GITLAB_HOST']
GITLAB_APPID = os.environ['GITLAB_APPID']
GITLAB_APP_SECRET = os.environ['GITLAB_APP_SECRET']
# time tags value is hour for this tag
DATE_TAGS = {
'0.25D': 2,
'0.5D': 5,
'1D': 24,
'2D': 48,
}
DATE_FORMAT = '%Y-%m-%d'
@app.errorhandler(401)
def not_login_handler(error):
url = GITLAB_HOST + '/oauth/authorize?client_id={client_id}&redirect_uri={redirect_uri}&response_type=code'
auth_url = url_for('.index', _external=True)
url = url.format(client_id=GITLAB_APPID, redirect_uri=auth_url)
return redirect(url)
class GitlabToken(object):
def __init__(self, code=None, token_json=None):
logging.debug('instance gitlab token. code: %s, token_json:%s' % (code, token_json))
if code:
data = self._auth(code)
else:
data = token_json
self.token_json = data
self.access_token = data['access_token']
self.expires_at = datetime.now() + timedelta(seconds=7000)
self.refresh_token = data['refresh_token']
def __str__(self):
return "<access_token: %s, expires_at: %s>" % (self.access_token, self.expires_at)
def _auth(self, code):
url = GITLAB_HOST + '/oauth/token'
params = {
"client_id": GITLAB_APPID,
"client_secret": GITLAB_APP_SECRET,
"code": code,
"grant_type": "authorization_code",
"redirect_uri": url_for('.index', _external=True)
}
r = requests.post(url, params=params)
logging.debug('result:', url, params, r.content, r.status_code)
if r.status_code != 200:
abort(400)
return r.json()
def _refresh_token(self):
url = GITLAB_HOST + '/oauth/token'
params = {
"refresh_token": self.refresh_token,
"grant_type": "refresh_token",
"scope": "api"
}
r = requests.post(url, params=params)
logging.debug('result:', url, params, r.content, r.status_code)
if r.status_code != 200:
abort(400)
return r.json()
def is_valid(self):
return self.access_token and self.expires_at and datetime.now() < self.expires_at
def refresh(self):
data = self._refresh_token()
self.access_token = data['access_token']
self.expires_at = datetime.now() + timedelta(seconds=7000)
self.refresh_token = data['refresh_token']
def get_token_or_refresh(self):
if not self.is_valid():
self.refresh_token()
return self.access_token
@classmethod
def get_instance(cls):
code = request.args.get('code')
token_json = session.get('token_json')
logging.debug('token: %s' % token_json)
if token_json:
token = GitlabToken(token_json=token_json)
elif code:
token = GitlabToken(code=code)
session['token_json'] = token.token_json
else:
abort(401)
return token
@app.route('/')
def index():
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/groups'
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
data = reversed(r.json())
logging.debug('groups: %s' % r.content.decode())
current_group_id = r.json()[0]['id'] if 'current_group_id' not in session else session['current_group_id']
return render_template('index.html', groups=data, current_group_id=int(current_group_id))
@app.route('/milestones')
def api_milestones():
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/projects'
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
milestones = []
for project in r.json():
url = GITLAB_HOST + '/api/v3/projects/%s/milestones' % project['id']
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
logging.debug('milestones: %s' % r.content)
if r.json():
milestones += r.json()
return jsonify(milestones)
@app.route('/api/calendar')
def api_calendar():
current_group_id = request.args.get('current_group_id')
session['current_group_id'] = current_group_id
events = []
token = GitlabToken.get_instance()
url = GITLAB_HOST + '/api/v3/groups/%s/issues?per_page=100&state=all' % current_group_id
logging.debug('url: %s' % url)
r = requests.get(url, headers={
"Authorization": "Bearer " + token.access_token
})
logging.debug('result issues: %s' % r.content.decode())
for issue in r.json():
data = {
"title": issue.get('title'),
"start": issue.get('created_at')[:10],
"allDay": True,
}
if issue.get('assignee'):
data['title'] += ' <i class="fa fa-user" aria-hidden="true"></i>%s' % issue['assignee']['name']
if issue.get('state') == 'closed':
data['backgroundColor'] = '#00a65a'
data['borderColor'] = '#00a65a'
due_date = issue.get('due_date')
if due_date:
due_date_time = datetime.strptime(due_date, DATE_FORMAT) + timedelta(hours=24)
data["end"] = due_date
labels = issue.get('labels')
if labels:
for label in labels:
date_tag = DATE_TAGS.get(label)
if date_tag:
fixed_start = due_date_time - timedelta(hours=date_tag)
fixed_start = fixed_start.strftime(DATE_FORMAT)
data['start'] = fixed_start
data['title'] += ' <i class="fa fa-clock-o" aria-hidden="true"></i>' + label
break
else:
data['backgroundColor'] = '#ad8d43'
data['borderColor'] = '#ad8d43'
else:
data['backgroundColor'] = '#ad8d43'
data['borderColor'] = '#ad8d43'
if issue.get('state') != 'closed':
if datetime.now() > due_date_time:
data['backgroundColor'] = '#f56954'
data['borderColor'] = '#f56954'
events.append(data)
return jsonify(events)
|
|
from __future__ import print_function
import sqlite3
import matplotlib
import pandas as pd
import flask
from sys import maxint
import os
# http://stackoverflow.com/questions/2801882/generating-a-png-with-matplotlib-when-display-is-undefined#3054314
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
def get_db():
db = getattr(flask.g, '_database', None)
if db is None:
db = flask.g._database = sqlite3.connect(os.path.normpath(os.path.dirname(__file__) + '/../data/db.sqlite'))
return db
def _to_abs_position(chromosome, location):
location = int(location)
r = get_db().execute('SELECT shift, length FROM chromosome WHERE chr_name = ?', (chromosome,)).fetchone()
shift = r[0]
length = r[1]
# clamp to max length
if location > length:
location = length
return shift + location
def chromosome_get():
chromosomes = pd.read_sql('select * from chromosome order by rowid', con=get_db())
r = chromosomes.to_json(orient='records')
return flask.Response(r, mimetype='application/json')
def manhattan_meta_get(geq_significance=None):
import numpy as np
chromosomes = pd.read_sql('select * from chromosome order by rowid', con=get_db()).to_records()
def to_meta(chromosome):
return dict(name=chromosome.chr_name, shift=long(chromosome.shift), start=long(chromosome.start + chromosome.shift),
end=long(chromosome.end + chromosome.shift))
x_max = chromosomes[-1].length + chromosomes[-1].shift
r = get_db().execute('SELECT min(pval) FROM snp WHERE pval <= ?',
(sig2pval(geq_significance) if geq_significance is not None else 1,)).fetchone()
y_max = float(-np.log10(r[0]))
return dict(ylim=[0, y_max], xlim=[0, long(x_max)],
chromosomes=[to_meta(d) for d in chromosomes])
def sig2pval(sig):
import numpy as np
r = float(np.exp(-sig))
return r
def manhattan_get(width=None, height=None, geq_significance=None, plain=None):
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
import os.path
file_name = 'manhattan_{w}_{h}_{s}_{p}.png'.format(w=width, h=height, s=geq_significance, p=plain)
cache_key = os.path.normpath(os.path.dirname(__file__) + '/../local_cache/' + file_name)
if os.path.isfile(cache_key):
with open(cache_key) as f:
buffer = BytesIO()
buffer.write(f.read())
buffer.seek(0)
return flask.send_file(buffer, as_attachment=False,
attachment_filename='manhattan.png',
mimetype='image/png')
# http://stackoverflow.com/questions/37463184/how-to-create-a-manhattan-plot-with-matplotlib-in-python
chromosomes = pd.read_sql('select * from chromosome order by rowid', con=get_db()).to_records()
fig = plt.figure()
ax = fig.add_subplot(111)
colors = ['0.9', '0.6'] # http://matplotlib.org/api/colors_api.html
x_labels = []
x_labels_pos = []
maxes = []
for num, chromosome in enumerate(chromosomes):
query = 'select s.*, (s.chrom_start + c.shift) as abs_location from snp s left join chromosome c on s.chr_name = c.chr_name where pval <= ? and s.chr_name = ? order by abs_location'
group = pd.read_sql(query,
params=(sig2pval(geq_significance) if geq_significance is not None else 1, chromosome.chr_name),
con=get_db())
# -log_10(pvalue)
group['minuslog10pvalue'] = -np.log10(group.pval)
# print(num, chromosome.chr_name, len(group) if group is not None else 0)
if group is not None and not group.empty:
maxes.append(np.max(group.minuslog10pvalue))
plt.scatter(x=group.abs_location, y=group.minuslog10pvalue, color=colors[num % len(colors)], s=10)
x_labels.append(chromosome.chr_name)
# center
x_labels_pos.append(chromosome.start + chromosome.shift + chromosome.length / 2)
x_max = chromosomes[-1].length + chromosomes[-1].shift
ax.set_xlim([0, x_max])
ax.set_ylim([0, np.max(maxes)])
if plain:
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
else:
ax.set_xticks(x_labels_pos)
ax.set_xticklabels(x_labels)
ax.set_xlabel('Chromosome')
# write to memory
buffer = BytesIO()
fig.set_size_inches((width or 160) / 50, (height or 90) / 50)
args = dict(dpi=50)
if plain:
args['bbox_inches'] = 'tight'
args['pad_inches'] = 0
plt.savefig(buffer, **args)
buffer.seek(0)
plt.savefig(cache_key, **args)
print('generated image')
result = flask.send_file(buffer, as_attachment=False,
attachment_filename='manhattan.png',
mimetype='image/png')
return result
def _to_snp_query(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint, geq_significance=None,
leq_significance=None):
params = []
q = 'from snp s left join chromosome c on s.chr_name = c.chr_name '
append = 'where'
if to_chromosome is not None and from_chromosome is None:
abs_to = _to_abs_position(to_chromosome, to_location)
q += append + ' (s.chrom_start + c.shift) <= ?'
append = 'and'
params.append(abs_to)
elif from_chromosome is not None:
abs_from = _to_abs_position(from_chromosome, from_location)
abs_to = _to_abs_position(to_chromosome or from_chromosome, to_location)
q += append + ' (s.chrom_start + c.shift) between ? and ?'
append = 'and'
params.extend((abs_from, abs_to))
if geq_significance or leq_significance is not None:
q += append + ' (pval between ? and ?)'
append = 'and'
params.extend((0 if leq_significance is None else sig2pval(leq_significance),
1 if geq_significance is None else sig2pval(geq_significance)))
return q, params
def data_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint, geq_significance=None,
leq_significance=None):
query_from_where, params = _to_snp_query(from_chromosome, from_location, to_chromosome, to_location, geq_significance,
leq_significance)
query = 'select s.*, (s.chrom_start + c.shift) as abs_chrom_start ' + query_from_where + ' order by abs_chrom_start'
data = pd.read_sql(query, params=params, con=get_db())
r = data.to_json(orient='records')
return flask.Response(r, mimetype='application/json')
def data_count_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint, geq_significance=None,
leq_significance=None):
query_from_where, params = _to_snp_query(from_chromosome, from_location, to_chromosome, to_location, geq_significance,
leq_significance)
query = 'select count(*) ' + query_from_where
count = get_db().execute(query, params).fetchone()[0]
return count
def _to_exon_query(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint, ):
params = []
q = 'from exon s left join chromosome c on s.chr_name = c.chr_name '
append = 'where'
if to_chromosome is not None and from_chromosome is None:
abs_to = _to_abs_position(to_chromosome, to_location)
q += append + ' (s.end + c.shift) <= ?'
append = 'and'
params.append(abs_to)
elif from_chromosome is not None:
abs_from = _to_abs_position(from_chromosome, from_location)
abs_to = _to_abs_position(to_chromosome or from_chromosome, to_location)
q += append + ' (s.start + c.shift) >= ? and (s.end + c.shift) <= ?'
append = 'and'
params.extend((abs_from, abs_to))
return q, params
def exon_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint):
query_from_where, params = _to_exon_query(from_chromosome, from_location, to_chromosome, to_location)
query = 'select s.*, (s.start + c.shift) as abs_start, (s.end + c.shift) as abs_end ' + query_from_where + ' order by abs_start'
data = pd.read_sql(query, params=params, con=get_db())
r = data.to_json(orient='records')
return flask.Response(r, mimetype='application/json')
def exon_count_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint):
query_from_where, params = _to_exon_query(from_chromosome, from_location, to_chromosome, to_location)
query = 'select count(*) ' + query_from_where
count = get_db().execute(query, params).fetchone()[0]
return count
def _gene_exon_get(query, params):
from json import dumps
data = pd.read_sql(query, params=params, con=get_db())
grouped = data.groupby('gene_name')
gene_list = []
for name, group in grouped:
exons = group.loc[:, ['start', 'end', 'abs_start', 'abs_end']]
# build base data
gene = dict(gene_name=name, strand=group.strand.min(), start=group.start.min(), abs_start=group.abs_start.min(),
end=group.end.max(), abs_end=group.abs_end.max(), exons='EXONS')
gene_string = dumps(gene)
# use fast pandas to string for the detail data
gene_string = gene_string.replace('"EXONS"', exons.to_json(orient='records'))
gene_list.append(gene_string)
return '[' + ','.join(gene_list) + ']'
def gene_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint, with_exons=False):
query_from_where, params = _to_exon_query(from_chromosome, from_location, to_chromosome, to_location)
if with_exons:
query = 'select s.*, (s.start + c.shift) as abs_start, (s.end + c.shift) as abs_end ' + query_from_where + ' order by abs_start'
r = _gene_exon_get(query, params)
else:
query = """select s.gene_name, s.strand, min(s.start) as start, max(s.end) as end, min(s.start + c.shift) as abs_start, max(s.end + c.shift) as abs_end
{from_where}
group by s.gene_name order by s.gene_name, s.strand""" \
.format(from_where=query_from_where)
data = pd.read_sql(query, params=params, con=get_db())
r = data.to_json(orient='records')
return flask.Response(r, mimetype='application/json')
def gene_exon_get(gene_name):
query = """
select s.*, (s.start + c.shift) as abs_start, (s.end + c.shift) as abs_end
from exon s left join chromosome c on s.chr_name = c.chr_name
where s.gene_name in ("{names}") order by abs_start
""".format(names='","'.join(gene_name))
r = _gene_exon_get(query, ())
return flask.Response(r, mimetype='application/json')
def gene_count_get(from_chromosome=None, from_location=0, to_chromosome=None, to_location=maxint):
query_from_where, params = _to_exon_query(from_chromosome, from_location, to_chromosome, to_location)
query = 'select count(*) from (select s.gene_name, s.strand ' + query_from_where + ' group by s.gene_name, s.strand)'
count = get_db().execute(query, params).fetchone()[0]
return count
|
|
# -*- coding: utf-8 -*-
"""
Tests for the standard library PEP 543 shim.
"""
import pep543
import pep543.stdlib
import pytest
from .backend_tests import SimpleNegotiation
CONTEXTS = (
pep543.stdlib.STDLIB_BACKEND.client_context,
pep543.stdlib.STDLIB_BACKEND.server_context
)
def wrap_buffers(context):
"""
A convenient helper that calls wrap_buffers with the appropriate number of
arguments.
"""
if isinstance(context, pep543.stdlib.STDLIB_BACKEND.client_context):
return context.wrap_buffers(server_hostname=None)
else:
return context.wrap_buffers()
class TestSimpleNegotiationStdlib(SimpleNegotiation):
BACKEND = pep543.stdlib.STDLIB_BACKEND
class TestStdlibErrorHandling(object):
"""
Validate that the stdlib backend can do sensible error handling in specific
situations that it cannot handle.
"""
@pytest.mark.parametrize(
'lowest,highest', (
(object(), None), (None, object()), (object(), object())
)
)
@pytest.mark.parametrize('context', CONTEXTS)
def test_bad_values_for_versions_client(self, lowest, highest, context):
"""
Using TLSConfiguration objects with a bad value for their minimum or
maximum version raises a TLSError with Client contexts.
"""
config = pep543.TLSConfiguration(
validate_certificates=False,
lowest_supported_version=lowest,
highest_supported_version=highest
)
ctx = context(config)
with pytest.raises(pep543.TLSError):
wrap_buffers(ctx)
@pytest.mark.parametrize('context', CONTEXTS)
def test_no_supported_cipher_suites(self, context):
"""
Using TLSConfiguration objects that have only unsupported cipher suites
raises a TLSError.
"""
# We assume that no cipher suite will be defined with the code eeee.
config = pep543.TLSConfiguration(
ciphers=[0xeeee],
trust_store=pep543.stdlib.STDLIB_BACKEND.trust_store.system()
)
ctx = context(config)
with pytest.raises(pep543.TLSError) as e:
wrap_buffers(ctx)
assert "supported ciphers" in str(e)
class TestStdlibImplementation(object):
"""
Tests that ensure that specific implementation details of the stdlib shim
work the way we want.
"""
@pytest.mark.parametrize('context', CONTEXTS)
def test_system_trust_store_loads(self, monkeypatch, context):
"""
When a context is instructed to load the system trust store, it calls
load_default_certs.
"""
calls = 0
def load_default_certs(*args):
nonlocal calls
calls += 1
monkeypatch.setattr(
'ssl.SSLContext.load_default_certs', load_default_certs
)
config = pep543.TLSConfiguration(
trust_store=pep543.stdlib.STDLIB_BACKEND.trust_store.system()
)
ctx = context(config)
wrap_buffers(ctx)
assert calls == 1
@pytest.mark.parametrize('context', CONTEXTS)
def test_unknown_cipher_suites(self, monkeypatch, context):
"""
When a buffer object returns a cipher that doesn't appear to be
suppported by the given OpenSSL implementation, a TLSError is raised.
"""
def unknown_cipher(*args):
return ('not_a_tls_cipher_suite', None, None)
monkeypatch.setattr('ssl.SSLObject.cipher', unknown_cipher)
config = pep543.TLSConfiguration(
trust_store=pep543.stdlib.STDLIB_BACKEND.trust_store.system()
)
ctx = context(config)
buffer = wrap_buffers(ctx)
with pytest.raises(pep543.TLSError):
buffer.cipher()
@pytest.mark.parametrize('context', CONTEXTS)
def test_cipher_suite_not_in_enum(self, monkeypatch, context):
"""
When a buffer object returns a cipher that is not in the PEP543
CipherSuite enum object, it returns the cipher suite ID instead.
"""
# We try this with the cipher suite NULL-MD5. This may not work on all
# OpenSSL versions, so if this test breaks investigate and maybe change
# to a different one.
EXTRA_CIPHER_ID = 0x0001
EXTRA_CIPHER_NAME = 'NULL-MD5'
def md5_cipher(*args):
return (EXTRA_CIPHER_NAME, None, None)
monkeypatch.setattr('ssl.SSLObject.cipher', md5_cipher)
cipher_list = pep543.DEFAULT_CIPHER_LIST + [EXTRA_CIPHER_ID]
config = pep543.TLSConfiguration(
trust_store=pep543.stdlib.STDLIB_BACKEND.trust_store.system(),
ciphers=cipher_list
)
ctx = context(config)
buffer = wrap_buffers(ctx)
suite = buffer.cipher()
assert not isinstance(suite, pep543.CipherSuite)
assert suite == EXTRA_CIPHER_ID
class TestStdlibProtocolNegotiation(object):
"""
Tests that validate the standard library's protocol negotiation semantics.
"""
def assert_negotiated_protocol(self, context, negotiated_protocol):
"""
Test that the protocol negotiated is as expected.
"""
if negotiated_protocol is not None:
negotiated_protocol = pep543.NextProtocol(negotiated_protocol)
config = pep543.TLSConfiguration(
validate_certificates=False,
inner_protocols=(pep543.NextProtocol.H2,)
)
ctx = context(config)
buffer = wrap_buffers(ctx)
assert (buffer.negotiated_protocol() == negotiated_protocol)
@pytest.mark.parametrize('context', CONTEXTS)
def test_works_with_just_npn(self, monkeypatch, context):
"""
If ALPN is not present, protocol negotiation will fall back to NPN.
"""
negotiated_protocol = b'h2'
def notimplemented(*args):
raise NotImplementedError()
def ignored(*args):
pass
def negotiated(*args):
return negotiated_protocol.decode('utf-8')
monkeypatch.setattr(
'ssl.SSLContext.set_alpn_protocols', notimplemented
)
monkeypatch.setattr(
'ssl.SSLObject.selected_alpn_protocol', ignored
)
monkeypatch.setattr('ssl.SSLContext.set_npn_protocols', ignored)
monkeypatch.setattr('ssl.SSLObject.selected_npn_protocol', negotiated)
self.assert_negotiated_protocol(context, negotiated_protocol)
@pytest.mark.parametrize('context', CONTEXTS)
def test_works_with_just_alpn(self, monkeypatch, context):
"""
If NPN is not present, protocol negotiation will just use ALPN.
"""
negotiated_protocol = b'h2'
def notimplemented(*args):
raise NotImplementedError()
def ignored(*args):
pass
def negotiated(*args):
return negotiated_protocol.decode('utf-8')
monkeypatch.setattr('ssl.SSLContext.set_alpn_protocols', ignored)
monkeypatch.setattr(
'ssl.SSLObject.selected_alpn_protocol', negotiated
)
monkeypatch.setattr('ssl.SSLContext.set_npn_protocols', notimplemented)
monkeypatch.setattr('ssl.SSLObject.selected_npn_protocol', ignored)
self.assert_negotiated_protocol(context, negotiated_protocol)
@pytest.mark.parametrize('context', CONTEXTS)
def test_prefers_alpn(self, monkeypatch, context):
"""
If both NPN and ALPN are present, ALPN is preferred to NPN.
"""
negotiated_protocol = b'h2'
def ignored(*args):
pass
def negotiated(*args):
return negotiated_protocol.decode('utf-8')
def wrong(*args):
return b'this is not right'
monkeypatch.setattr('ssl.SSLContext.set_alpn_protocols', ignored)
monkeypatch.setattr(
'ssl.SSLObject.selected_alpn_protocol', negotiated
)
monkeypatch.setattr('ssl.SSLContext.set_npn_protocols', ignored)
monkeypatch.setattr('ssl.SSLObject.selected_npn_protocol', wrong)
self.assert_negotiated_protocol(context, negotiated_protocol)
@pytest.mark.parametrize('context', CONTEXTS)
def test_no_protocols(self, monkeypatch, context):
"""
If neither NPN nor ALPN are present, no protocol is negotiated.
"""
negotiated_protocol = None
def ignored(*args):
pass
monkeypatch.setattr('ssl.SSLContext.set_alpn_protocols', ignored)
monkeypatch.setattr(
'ssl.SSLObject.selected_alpn_protocol', ignored
)
monkeypatch.setattr('ssl.SSLContext.set_npn_protocols', ignored)
monkeypatch.setattr('ssl.SSLObject.selected_npn_protocol', ignored)
self.assert_negotiated_protocol(context, negotiated_protocol)
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2RModels for VRGripper env tasks."""
from typing import Callable, Dict, List, Optional, Text, Tuple
import gin
import numpy as np
from tensor2robot.layers import mdn
from tensor2robot.layers import vision_layers
from tensor2robot.meta_learning import meta_tfdata
from tensor2robot.models import abstract_model
from tensor2robot.models import regression_model
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.preprocessors import distortion
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf # tf
import tensorflow_probability as tfp
from tensorflow.contrib import layers as contrib_layers
TensorSpec = tensorspec_utils.ExtendedTensorSpec
TRAIN = tf.estimator.ModeKeys.TRAIN
PREDICT = tf.estimator.ModeKeys.PREDICT
FLOAT_DTYPES = [tf.bfloat16, tf.float32, tf.float64]
@gin.configurable
class DefaultVRGripperPreprocessor(abstract_preprocessor.AbstractPreprocessor):
"""The default VRGripperEnv preprocessor."""
def __init__(self,
src_img_res = (220, 300),
crop_size = (200, 280),
mixup_alpha = 0.0,
**kwargs):
"""Construct the preprocessor.
Args:
src_img_res: The true height and width of the image data. If the model
expects images of a different size, we automatically resize the images.
crop_size: Before resizing the image, take a crop of the image to this
height and width. Is a no-op if equal to src_img_res. Crop is done
randomly at train time, and is take from the center otherwise.
mixup_alpha: If > 0., turns on Mixup data augmentation for features and
labels.
**kwargs: Keyword args passed to parent class.
"""
super(DefaultVRGripperPreprocessor, self).__init__(**kwargs)
self._src_img_res = src_img_res
self._crop_size = crop_size
self._mixup_alpha = mixup_alpha
def get_in_feature_specification(self, mode
):
"""See base class."""
feature_spec = tensorspec_utils.copy_tensorspec(
self._model_feature_specification_fn(mode))
# Don't want to parse the original_image, since we don't want to parse it
# and we are adding this feature in preprocess_fn to satisfy the model's
# inputs.
if mode != PREDICT and 'original_image' in feature_spec:
del feature_spec['original_image']
if 'image' in feature_spec:
true_img_shape = feature_spec.image.shape.as_list()
# Overwrite the H, W dimensions.
true_img_shape[-3:-1] = self._src_img_res
feature_spec.image = TensorSpec.from_spec(
feature_spec.image, shape=true_img_shape, dtype=tf.uint8)
return tensorspec_utils.flatten_spec_structure(feature_spec)
def get_in_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def get_out_feature_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_feature_specification_fn(mode))
def get_out_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def _preprocess_fn(
self, features,
labels,
mode
):
"""Resize images and convert them from uint8 -> float32."""
if 'image' in features:
ndim = len(features.image.shape)
is_sequence = (ndim > 4)
input_size = self._src_img_res
target_size = self._crop_size
features.original_image = features.image
features.image = distortion.preprocess_image(features.image, mode,
is_sequence, input_size,
target_size)
features.image = tf.image.convert_image_dtype(features.image, tf.float32)
out_feature_spec = self.get_out_feature_specification(mode)
if out_feature_spec.image.shape != features.image.shape:
features.image = meta_tfdata.multi_batch_apply(
tf.image.resize_images, 2, features.image,
out_feature_spec.image.shape.as_list()[-3:-1])
if self._mixup_alpha > 0. and labels and mode == TRAIN:
lmbda = tfp.distributions.Beta(
self._mixup_alpha, self._mixup_alpha).sample()
for key, x in features.items():
if x.dtype in FLOAT_DTYPES:
features[key] = lmbda * x + (1-lmbda)*tf.reverse(x, axis=[0])
if labels is not None:
for key, x in labels.items():
if x.dtype in FLOAT_DTYPES:
labels[key] = lmbda * x + (1 - lmbda) * tf.reverse(x, axis=[0])
return features, labels
@gin.configurable
class VRGripperRegressionModel(regression_model.RegressionModel):
"""Continuous regression output model for VRGripper Env."""
def __init__(self,
use_gripper_input = True,
normalize_outputs = False,
output_mean = None,
output_stddev = None,
outer_loss_multiplier = 1.,
num_mixture_components = 1,
output_mixture_sample = False,
condition_mixture_stddev = False,
episode_length = 40,
**kwargs):
"""Initialize the VRGripperRegressionModel.
Args:
use_gripper_input: If True, concatenate gripper pose with input to the
fully connected layers when predicting actions.
normalize_outputs: If True, scale actions by `output_stddev` and
translate by `output_mean`.
output_mean: The empirical mean of demonstration actions.
output_stddev: The empirical standard deviation of demonstration actions.
outer_loss_multiplier: A scaling factor for the outer loss.
num_mixture_components: The number of gaussian mixture components. Use 1
for standard mean squared error regression.
output_mixture_sample: If True (and num_mixture_components > 1), output
actions by sampling from a gaussian mixture. Otherwise, we use the mean
of the most likely component.
condition_mixture_stddev: If True, the mixture standard deviations will be
output from a neural net and thus conditioned on image/state. Otherwise,
they will simply be learned variables (unconditioned on image/state).
episode_length: The fixed length of an episode in the data.
**kwargs: Passed to parent.
Raises:
ValueError: If `output_mean` or `output_stddev` have incorrect length.
"""
super(VRGripperRegressionModel, self).__init__(**kwargs)
self._use_gripper_input = use_gripper_input
self._normalize_outputs = normalize_outputs
self._output_mean = None
self._output_stddev = None
self._outer_loss_multiplier = outer_loss_multiplier
self._num_mixture_components = num_mixture_components
self._output_mixture_sample = output_mixture_sample
self._condition_mixture_stddev = condition_mixture_stddev
self._episode_length = episode_length
if output_mean and output_stddev:
if not len(output_mean) == len(output_stddev) == self.action_size:
raise ValueError(
'Output mean and stddev have lengths {:d} and {:d}.'.format(
len(output_mean), len(output_stddev)))
self._output_mean = np.array([output_mean])
self._output_stddev = np.array([output_stddev])
@property
def default_preprocessor_cls(self):
return DefaultVRGripperPreprocessor
def get_feature_specification(self, mode):
del mode
image_spec = TensorSpec(
shape=(100, 100, 3),
dtype=tf.float32,
name='image0',
data_format='jpeg')
gripper_pose_spec = TensorSpec(
shape=(14,), dtype=tf.float32, name='world_pose_gripper')
tspec = tensorspec_utils.TensorSpecStruct(
image=image_spec, gripper_pose=gripper_pose_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
def get_label_specification(self, mode):
del mode
action_spec = TensorSpec(
shape=(self._action_size,), dtype=tf.float32, name='action_world')
tspec = tensorspec_utils.TensorSpecStruct(action=action_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
@property
def action_size(self):
return self._action_size
def _single_batch_a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE):
"""A state -> action regression function that expects a single batch dim."""
gripper_pose = features.gripper_pose if self._use_gripper_input else None
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
fc_input = tf.concat([feature_points, gripper_pose], -1)
outputs = {}
if self._num_mixture_components > 1:
dist_params = mdn.predict_mdn_params(
fc_input,
self._num_mixture_components,
self._action_size,
condition_sigmas=self._condition_mixture_stddev)
gm = mdn.get_mixture_distribution(
dist_params, self._num_mixture_components, self._action_size,
self._output_mean if self._normalize_outputs else None)
if self._output_mixture_sample:
# Output a mixture sample as action.
action = gm.sample()
else:
action = mdn.gaussian_mixture_approximate_mode(gm)
outputs['dist_params'] = dist_params
else:
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
fc_input, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
outputs.update({
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax']
})
return outputs
def a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE,
config=None,
params=None):
"""A (state) regression function.
This function can return a stochastic or a deterministic tensor.
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_spefication.
scope: String specifying variable scope.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
context_fn: Optional python function that takes in features and returns
new features of same shape. For merging information like in RL^2.
reuse: Whether or not to reuse variables under variable scope 'scope'.
config: Optional configuration object. Will receive what is passed to
Estimator in config parameter, or the default config. Allows updating
things in your model_fn based on configuration such as num_ps_replicas,
or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
outputs: A {key: Tensor} mapping. The key 'action' is required.
"""
del config, params
return meta_tfdata.multi_batch_apply(self._single_batch_a_func, 2, features,
scope, mode, context_fn, reuse)
def loss_fn(self, labels, inference_outputs, mode, params=None):
"""This implements outer loss and configurable inner losses."""
if params and params.get('is_outer_loss', False):
pass
if self._num_mixture_components > 1:
gm = mdn.get_mixture_distribution(
inference_outputs['dist_params'], self._num_mixture_components,
self._action_size,
self._output_mean if self._normalize_outputs else None)
return -tf.reduce_mean(gm.log_prob(labels.action))
else:
return self._outer_loss_multiplier * tf.losses.mean_squared_error(
labels=labels.action,
predictions=inference_outputs['inference_output'])
@gin.configurable
class VRGripperDomainAdaptiveModel(VRGripperRegressionModel):
"""Base model which uses a learned loss to do domain adaptive imitation.
The model conditions on video only (no actions or gripper pose).
"""
def __init__(self,
predict_con_gripper_pose = False,
learned_loss_conv1d_layers = (10, 10,
6),
**kwargs):
"""Initialize the model.
Args:
predict_con_gripper_pose: If True, predict the condition gripper pose
input from the image features. Otherwise, set to zeros.
learned_loss_conv1d_layers: A tuple describing the conv1d layers of the
learned loss. If None, the learned loss won't use conv1d layers.
**kwargs: Passed to parent.
"""
super(VRGripperDomainAdaptiveModel, self).__init__(**kwargs)
self._predict_con_gripper_pose = predict_con_gripper_pose
self._learned_loss_conv1d_layers = learned_loss_conv1d_layers
def _predict_gripper_pose(self, feature_points):
"""Predict the condition gripper pose from feature points."""
out = feature_points
out = tf.layers.dense(out, 40, activation=tf.nn.relu, use_bias=False)
out = contrib_layers.layer_norm(out)
out = tf.layers.dense(out, 14, activation=None)
return out
def single_batch_a_func(
self, features, scope,
mode,
context_fn, reuse,
config,
params):
"""Single step action predictor when there is a single batch dim."""
del config
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
if params and params.get('is_inner_loop', False):
if self._predict_con_gripper_pose:
gripper_pose = self._predict_gripper_pose(feature_points)
else:
gripper_pose = tf.zeros_like(features.gripper_pose)
else:
gripper_pose = features.gripper_pose
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
feature_points, aux_input=gripper_pose, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
return {
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax'],
}
def a_func(self,
features,
scope,
mode,
context_fn = None,
reuse=tf.AUTO_REUSE,
config = None,
params = None
):
"""Single step action predictor. See parent class."""
return meta_tfdata.multi_batch_apply(self.single_batch_a_func, 2, features,
scope, mode, context_fn, reuse, config,
params)
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config = None,
params = None
):
"""Output learned loss if inner loop, or behavior clone if outer loop."""
if params and params.get('is_outer_loss', False):
# Outer loss case: use standard RegressionModel loss.
return self.loss_fn(labels, inference_outputs, mode, params)
# Inner loss case: compute learned loss function.
with tf.variable_scope(
'learned_loss', reuse=tf.AUTO_REUSE, use_resource=True):
predicted_action, _ = meta_tfdata.multi_batch_apply(
vision_layers.BuildImageFeaturesToPoseModel,
2,
inference_outputs['feature_points'],
num_outputs=self._action_size)
if self._learned_loss_conv1d_layers is None:
return tf.losses.mean_squared_error(predicted_action,
inference_outputs['action'])
ll_input = tf.concat([
predicted_action, inference_outputs['feature_points'],
inference_outputs['inference_output']
], -1)
net = ll_input
for num_filters in self._learned_loss_conv1d_layers[:-1]:
net = tf.layers.conv1d(
net, num_filters, 10, activation=tf.nn.relu, use_bias=False)
net = contrib_layers.layer_norm(net)
net = tf.layers.conv1d(net, self._learned_loss_conv1d_layers[-1],
1) # 1x1 convolution.
return tf.reduce_mean(tf.reduce_sum(tf.square(net), axis=(1, 2)))
|
|
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import binascii
import struct
import shutil
import inspect
import sys
from copy import copy
from inspect import getmro
from collections import namedtuple, Mapping
from tools.targets.LPC import patch
from tools.paths import TOOLS_BOOTLOADERS
from tools.utils import json_file_to_dict
__all__ = ["target", "TARGETS", "TARGET_MAP", "TARGET_NAMES", "CORE_LABELS",
"HookError", "generate_py_target", "Target",
"CUMULATIVE_ATTRIBUTES", "get_resolution_order"]
CORE_LABELS = {
"Cortex-M0" : ["M0", "CORTEX_M", "LIKE_CORTEX_M0", "CORTEX"],
"Cortex-M0+": ["M0P", "CORTEX_M", "LIKE_CORTEX_M0", "CORTEX"],
"Cortex-M1" : ["M1", "CORTEX_M", "LIKE_CORTEX_M1", "CORTEX"],
"Cortex-M3" : ["M3", "CORTEX_M", "LIKE_CORTEX_M3", "CORTEX"],
"Cortex-M4" : ["M4", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M4", "CORTEX"],
"Cortex-M4F" : ["M4", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M4", "CORTEX"],
"Cortex-M7" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"],
"Cortex-M7F" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"],
"Cortex-M7FD" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"],
"Cortex-A9" : ["A9", "CORTEX_A", "LIKE_CORTEX_A9", "CORTEX"],
"Cortex-M23": ["M23", "CORTEX_M", "LIKE_CORTEX_M23", "CORTEX"],
"Cortex-M23-NS": ["M23", "CORTEX_M", "LIKE_CORTEX_M23", "CORTEX"],
"Cortex-M33": ["M33", "CORTEX_M", "LIKE_CORTEX_M33", "CORTEX"],
"Cortex-M33-NS": ["M33", "CORTEX_M", "LIKE_CORTEX_M33", "CORTEX"]
}
################################################################################
# Generic Target class that reads and interprets the data in targets.json
class HookError(Exception):
""" A simple class that represents all the exceptions associated with
hooking
"""
pass
CACHES = {}
def cached(func):
"""A simple decorator used for automatically caching data returned by a
function
"""
def wrapper(*args, **kwargs):
"""The wrapped function itself"""
if not CACHES.has_key((func.__name__, args)):
CACHES[(func.__name__, args)] = func(*args, **kwargs)
return CACHES[(func.__name__, args)]
return wrapper
# Cumulative attributes can have values appended to them, so they
# need to be computed differently than regular attributes
CUMULATIVE_ATTRIBUTES = ['extra_labels', 'macros', 'device_has', 'features']
def get_resolution_order(json_data, target_name, order, level=0):
""" Return the order in which target descriptions are searched for
attributes. This mimics the Python 2.2 method resolution order, which
is what the old targets.py module used. For more details, check
http://makina-corpus.com/blog/metier/2014/python-tutorial-understanding-python-mro-class-search-path
The resolution order contains (name, level) tuples, where "name" is the
name of the class and "level" is the level in the inheritance hierarchy
(the target itself is at level 0, its first parent at level 1, its
parent's parent at level 2 and so on)
"""
# the resolution order can't contain duplicate target names
if target_name not in [l[0] for l in order]:
order.append((target_name, level))
parents = json_data[target_name].get("inherits", [])
for par in parents:
order = get_resolution_order(json_data, par, order, level + 1)
return order
def target(name, json_data):
"""Construct a target object"""
resolution_order = get_resolution_order(json_data, name, [])
resolution_order_names = [tgt for tgt, _ in resolution_order]
return Target(name=name,
json_data={key: value for key, value in json_data.items()
if key in resolution_order_names},
resolution_order=resolution_order,
resolution_order_names=resolution_order_names)
def generate_py_target(new_targets, name):
"""Add one or more new target(s) represented as a Python dictionary
in 'new_targets'. It is an error to add a target with a name that
already exists.
"""
base_targets = Target.get_json_target_data()
for new_target in new_targets.keys():
if new_target in base_targets:
raise Exception("Attempt to add target '%s' that already exists"
% new_target)
total_data = {}
total_data.update(new_targets)
total_data.update(base_targets)
return target(name, total_data)
class Target(namedtuple("Target", "name json_data resolution_order resolution_order_names")):
"""An object to represent a Target (MCU/Board)"""
# Default location of the 'targets.json' file
__targets_json_location_default = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'targets', 'targets.json')
# Current/new location of the 'targets.json' file
__targets_json_location = None
# Extra custom targets files
__extra_target_json_files = []
@staticmethod
@cached
def get_json_target_data():
"""Load the description of JSON target data"""
targets = json_file_to_dict(Target.__targets_json_location or
Target.__targets_json_location_default)
for extra_target in Target.__extra_target_json_files:
for k, v in json_file_to_dict(extra_target).iteritems():
if k in targets:
print 'WARNING: Custom target "%s" cannot replace existing target.' % k
else:
targets[k] = v
return targets
@staticmethod
def add_extra_targets(source_dir):
extra_targets_file = os.path.join(source_dir, "custom_targets.json")
if os.path.exists(extra_targets_file):
Target.__extra_target_json_files.append(extra_targets_file)
CACHES.clear()
@staticmethod
def set_targets_json_location(location=None):
"""Set the location of the targets.json file"""
Target.__targets_json_location = (location or
Target.__targets_json_location_default)
Target.__extra_target_json_files = []
# Invalidate caches, since the location of the JSON file changed
CACHES.clear()
@staticmethod
@cached
def get_module_data():
"""Get the members of this module using Python's "inspect" module"""
return dict([(m[0], m[1]) for m in
inspect.getmembers(sys.modules[__name__])])
@staticmethod
def __add_paths_to_progen(data):
"""Modify the exporter specification ("progen") by changing all
"template" keys to full paths
"""
out = {}
for key, val in data.items():
if isinstance(val, dict):
out[key] = Target.__add_paths_to_progen(val)
elif key == "template":
out[key] = [os.path.join(os.path.dirname(__file__), 'export', v)
for v in val]
else:
out[key] = val
return out
def __getattr_cumulative(self, attrname):
"""Look for the attribute in the class and its parents, as defined by
the resolution order
"""
tdata = self.json_data
# For a cumulative attribute, figure out when it was defined the
# last time (in attribute resolution order) then follow the "_add"
# and "_remove" data fields
for idx, tgt in enumerate(self.resolution_order):
# the attribute was defined at this level in the resolution
# order
if attrname in tdata[tgt[0]]:
def_idx = idx
break
else:
raise AttributeError("Attribute '%s' not found in target '%s'"
% (attrname, self.name))
# Get the starting value of the attribute
starting_value = (tdata[self.resolution_order[def_idx][0]][attrname]
or [])[:]
# Traverse the resolution list in high inheritance to low
# inheritance level, left to right order to figure out all the
# other classes that change the definition by adding or removing
# elements
for idx in xrange(self.resolution_order[def_idx][1] - 1, -1, -1):
same_level_targets = [tar[0] for tar in self.resolution_order
if tar[1] == idx]
for tar in same_level_targets:
data = tdata[tar]
# Do we have anything to add ?
if data.has_key(attrname + "_add"):
starting_value.extend(data[attrname + "_add"])
# Do we have anything to remove ?
if data.has_key(attrname + "_remove"):
# Macros can be defined either without a value (MACRO)
# or with a value (MACRO=10). When removing, we specify
# only the name of the macro, without the value. So we
# need to create a mapping between the macro name and
# its value. This will work for extra_labels and other
# type of arrays as well, since they fall into the
# "macros without a value" category (simple definitions
# without a value).
name_def_map = {}
for crtv in starting_value:
if crtv.find('=') != -1:
temp = crtv.split('=')
if len(temp) != 2:
raise ValueError(
"Invalid macro definition '%s'" % crtv)
name_def_map[temp[0]] = crtv
else:
name_def_map[crtv] = crtv
for element in data[attrname + "_remove"]:
if element not in name_def_map:
raise ValueError(
("Unable to remove '%s' in '%s.%s' since "
% (element, self.name, attrname)) +
"it doesn't exist")
starting_value.remove(name_def_map[element])
return starting_value
def __getattr_helper(self, attrname):
"""Compute the value of a given target attribute"""
if attrname in CUMULATIVE_ATTRIBUTES:
return self.__getattr_cumulative(attrname)
else:
tdata = self.json_data
starting_value = None
for tgt in self.resolution_order:
data = tdata[tgt[0]]
if data.has_key(attrname):
starting_value = data[attrname]
break
else: # Attribute not found
raise AttributeError(
"Attribute '%s' not found in target '%s'"
% (attrname, self.name))
# 'progen' needs the full path to the template (the path in JSON is
# relative to tools/export)
if attrname == "progen":
return self.__add_paths_to_progen(starting_value)
else:
return starting_value
def __getattr__(self, attrname):
""" Return the value of an attribute. This function only computes the
attribute's value once, then adds it to the instance attributes (in
__dict__), so the next time it is returned directly
"""
result = self.__getattr_helper(attrname)
self.__dict__[attrname] = result
return result
@staticmethod
@cached
def get_target(target_name):
""" Return the target instance starting from the target name """
return target(target_name, Target.get_json_target_data())
@property
def program_cycle_s(self):
"""Special override for program_cycle_s as it's default value depends
upon is_disk_virtual
"""
try:
return self.__getattr__("program_cycle_s")
except AttributeError:
return 4 if self.is_disk_virtual else 1.5
@property
def labels(self):
"""Get all possible labels for this target"""
names = copy(self.resolution_order_names)
if "Target" in names:
names.remove("Target")
labels = (names + CORE_LABELS[self.core] + self.extra_labels)
# Automatically define UVISOR_UNSUPPORTED if the target doesn't
# specifically define UVISOR_SUPPORTED
if "UVISOR_SUPPORTED" not in labels:
labels.append("UVISOR_UNSUPPORTED")
return labels
def init_hooks(self, hook, toolchain):
"""Initialize the post-build hooks for a toolchain. For now, this
function only allows "post binary" hooks (hooks that are executed
after the binary image is extracted from the executable file)
Positional Arguments:
hook - the hook object to add post-binary-hooks to
toolchain - the toolchain object for inspection
"""
# If there's no hook, simply return
try:
hook_data = self.post_binary_hook
except AttributeError:
return
# A hook was found. The hook's name is in the format
# "classname.functionname"
temp = hook_data["function"].split(".")
if len(temp) != 2:
raise HookError(
("Invalid format for hook '%s' in target '%s'"
% (hook_data["function"], self.name)) +
" (must be 'class_name.function_name')")
class_name, function_name = temp
# "class_name" must refer to a class in this file, so check if the
# class exists
mdata = self.get_module_data()
if not mdata.has_key(class_name) or \
not inspect.isclass(mdata[class_name]):
raise HookError(
("Class '%s' required by '%s' in target '%s'"
% (class_name, hook_data["function"], self.name)) +
" not found in targets.py")
# "function_name" must refer to a static function inside class
# "class_name"
cls = mdata[class_name]
if (not hasattr(cls, function_name)) or \
(not inspect.isfunction(getattr(cls, function_name))):
raise HookError(
("Static function '%s' " % function_name) +
("required by '%s' " % hook_data["function"]) +
("in target '%s' " % self.name) +
("not found in class '%s'" % class_name))
# Check if the hook specification also has toolchain restrictions
toolchain_restrictions = set(hook_data.get("toolchains", []))
toolchain_labels = set(c.__name__ for c in getmro(toolchain.__class__))
if toolchain_restrictions and \
not toolchain_labels.intersection(toolchain_restrictions):
return
# Finally, hook the requested function
hook.hook_add_binary("post", getattr(cls, function_name))
################################################################################
# Target specific code goes in this section
# This code can be invoked from the target description using the
# "post_binary_hook" key
class LPCTargetCode(object):
"""General LPC Target patching code"""
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
"""Patch an elf file"""
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC4088Code(object):
"""Code specific to the LPC4088"""
@staticmethod
def binary_hook(t_self, resources, elf, binf):
"""Hook to be run after an elf file is built"""
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTargetCode.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed
# size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTargetCode.lpc_patch(t_self, resources, elf, binf)
class TEENSY3_1Code(object):
"""Hooks for the TEENSY3.1"""
@staticmethod
def binary_hook(t_self, resources, elf, binf):
"""Hook that is run after elf is generated"""
# This function is referenced by old versions of targets.json and should
# be kept for backwards compatibility.
pass
class MTSCode(object):
"""Generic MTS code"""
@staticmethod
def _combine_bins_helper(target_name, binf):
"""combine bins with the bootloader for a particular target"""
loader = os.path.join(TOOLS_BOOTLOADERS, target_name, "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
@staticmethod
def combine_bins_mts_dot(t_self, resources, elf, binf):
"""A hook for the MTS MDOT"""
MTSCode._combine_bins_helper("MTS_MDOT_F411RE", binf)
@staticmethod
def combine_bins_mts_dragonfly(t_self, resources, elf, binf):
"""A hoof for the MTS Dragonfly"""
MTSCode._combine_bins_helper("MTS_DRAGONFLY_F411RE", binf)
class MCU_NRF51Code(object):
"""NRF51 Hooks"""
@staticmethod
def binary_hook(t_self, resources, _, binf):
"""Hook that merges the soft device with the bin file"""
# Scan to find the actual paths of soft device
sdf = None
for softdevice_and_offset_entry\
in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
for hexf in resources.hex_files:
if hexf.find(softdevice_and_offset_entry['name']) != -1:
t_self.debug("SoftDevice file found %s."
% softdevice_and_offset_entry['name'])
sdf = hexf
if sdf is not None:
break
if sdf is not None:
break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Look for bootloader file that matches this soft device or bootloader
# override image
blf = None
if t_self.target.MERGE_BOOTLOADER is True:
for hexf in resources.hex_files:
if hexf.find(t_self.target.OVERRIDE_BOOTLOADER_FILENAME) != -1:
t_self.debug("Bootloader file found %s."
% t_self.target.OVERRIDE_BOOTLOADER_FILENAME)
blf = hexf
break
elif hexf.find(softdevice_and_offset_entry['boot']) != -1:
t_self.debug("Bootloader file found %s."
% softdevice_and_offset_entry['boot'])
blf = hexf
break
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
_, ext = os.path.splitext(binf)
if ext == ".hex":
binh.loadhex(binf)
elif ext == ".bin":
binh.loadbin(binf, softdevice_and_offset_entry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s"
% softdevice_and_offset_entry['name'])
print("Merging Softdevice - %s" % softdevice_and_offset_entry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % blf)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as fileout:
binh.write_hex_file(fileout, write_start_addr=False)
class NCS36510TargetCode:
@staticmethod
def ncs36510_addfib(t_self, resources, elf, binf):
from tools.targets.NCS import add_fib_at_start
print("binf ", binf)
add_fib_at_start(binf[:-4])
class RTL8195ACode:
"""RTL8195A Hooks"""
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from tools.targets.REALTEK_RTL8195AM import rtl8195a_elf2bin
rtl8195a_elf2bin(t_self, elf, binf)
################################################################################
# Instantiate all public targets
def update_target_data():
TARGETS[:] = [Target.get_target(tgt) for tgt, obj
in Target.get_json_target_data().items()
if obj.get("public", True)]
# Map each target name to its unique instance
TARGET_MAP.clear()
TARGET_MAP.update(dict([(tgt.name, tgt) for tgt in TARGETS]))
TARGET_NAMES[:] = TARGET_MAP.keys()
TARGETS = []
TARGET_MAP = dict()
TARGET_NAMES = []
update_target_data()
# Some targets with different name have the same exporters
EXPORT_MAP = {}
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for tgt in TARGETS:
for detect_code in tgt.detect_code:
result[detect_code] = tgt.name
return result
def set_targets_json_location(location=None):
"""Sets the location of the JSON file that contains the targets"""
# First instruct Target about the new location
Target.set_targets_json_location(location)
# Then re-initialize TARGETS, TARGET_MAP and TARGET_NAMES. The
# re-initialization does not create new variables, it keeps the old ones
# instead. This ensures compatibility with code that does
# "from tools.targets import TARGET_NAMES"
update_target_data()
|
|
# -*- coding: utf-8 -*-
"""An extension of the objectfilter to provide plaso specific options."""
import datetime
import logging
from plaso.formatters import manager as formatters_manager
from plaso.formatters import mediator as formatters_mediator
# TODO: Changes this so it becomes an attribute instead of having backend
# load a front-end library.
from plaso.lib import errors
from plaso.lib import objectfilter
from plaso.lib import py2to3
from plaso.lib import timelib
from plaso.parsers import presets
class DictObject(object):
# There's a backslash in the class docstring, so as not to confuse Sphinx.
# pylint: disable=anomalous-backslash-in-string
"""A simple object representing a dict object.
To filter against an object that is stored as a dictionary the dict
is converted into a simple object. Since keys can contain spaces
and/or other symbols they are stripped out to make filtering work
like it is another object.
Example dict::
{'A value': 234,
'this (my) key_': 'value',
'random': True,
}
This object would then allow access to object.thismykey that would access
the key 'this (my) key\_' inside the dict.
"""
def __init__(self, dict_object):
"""Initialize the object and build a secondary dict."""
# TODO: Move some of this code to a more value typed system.
self._dict_object = dict_object
self._dict_translated = {}
for key, value in dict_object.items():
self._dict_translated[self._StripKey(key)] = value
def _StripKey(self, key):
"""Return a stripped version of the dict key without symbols."""
try:
return str(key).lower().translate(None, ' (){}+_=-<>[]')
except UnicodeEncodeError:
pass
def __getattr__(self, attr):
"""Return back entries from the dictionary."""
if attr in self._dict_object:
return self._dict_object.get(attr)
# Special case of getting all the key/value pairs.
if attr == '__all__':
ret = []
for key, value in self._dict_translated.items():
ret.append(u'{}:{}'.format(key, value))
return u' '.join(ret)
test = self._StripKey(attr)
if test in self._dict_translated:
return self._dict_translated.get(test)
class PlasoValueExpander(objectfilter.AttributeValueExpander):
"""An expander that gives values based on object attribute names."""
def __init__(self):
"""Initialize an attribute value expander."""
super(PlasoValueExpander, self).__init__()
def _GetMessage(self, event_object):
"""Returns a properly formatted message string.
Args:
event_object: the event object (instance od EventObject).
Returns:
A formatted message string.
"""
# TODO: move this somewhere where the mediator can be instantiated once.
formatter_mediator = formatters_mediator.FormatterMediator()
result = u''
try:
result, _ = formatters_manager.FormattersManager.GetMessageStrings(
formatter_mediator, event_object)
except KeyError as exception:
logging.warning(u'Unable to correctly assemble event: {0:s}'.format(
exception))
return result
def _GetSources(self, event_object):
"""Returns properly formatted source strings.
Args:
event_object: the event object (instance od EventObject).
"""
try:
source_short, source_long = (
formatters_manager.FormattersManager.GetSourceStrings(event_object))
except KeyError as exception:
logging.warning(u'Unable to correctly assemble event: {0:s}'.format(
exception))
return source_short, source_long
def _GetValue(self, obj, attr_name):
ret = getattr(obj, attr_name, None)
if ret:
if isinstance(ret, dict):
ret = DictObject(ret)
if attr_name == 'tag':
return ret.labels
return ret
# Check if this is a message request and we have a regular EventObject.
if attr_name == 'message':
return self._GetMessage(obj)
# Check if this is a source_short request.
if attr_name in ('source', 'source_short'):
source_short, _ = self._GetSources(obj)
return source_short
# Check if this is a source_long request.
if attr_name in ('source_long', 'sourcetype'):
_, source_long = self._GetSources(obj)
return source_long
def _GetAttributeName(self, path):
return path[0].lower()
class PlasoExpression(objectfilter.BasicExpression):
"""A Plaso specific expression."""
# A simple dictionary used to swap attributes so other names can be used
# to reference some core attributes (implementation specific).
swap_source = {
'date': 'timestamp',
'datetime': 'timestamp',
'time': 'timestamp',
'description_long': 'message',
'description': 'message',
'description_short': 'message_short',
}
def Compile(self, filter_implementation):
"""Compiles the filter implementation.
Args:
filter_implementation: a filter object (instance of objectfilter.TODO).
Returns:
A filter operator (instance of TODO).
Raises:
ParserError: if an unknown operator is provided.
"""
self.attribute = self.swap_source.get(self.attribute, self.attribute)
arguments = [self.attribute]
op_str = self.operator.lower()
operator = filter_implementation.OPS.get(op_str, None)
if not operator:
raise errors.ParseError(u'Unknown operator {0:s} provided.'.format(
self.operator))
# Plaso specific implementation - if we are comparing a timestamp
# to a value, we use our specific implementation that compares
# timestamps in a "human readable" format.
if self.attribute == 'timestamp':
args = []
for arg in self.args:
args.append(DateCompareObject(arg))
self.args = args
for arg in self.args:
if isinstance(arg, DateCompareObject):
if 'Less' in str(operator):
TimeRangeCache.SetUpperTimestamp(arg.data)
else:
TimeRangeCache.SetLowerTimestamp(arg.data)
arguments.extend(self.args)
expander = filter_implementation.FILTERS['ValueExpander']
ops = operator(arguments=arguments, value_expander=expander)
if not self.bool_value:
if hasattr(ops, 'FlipBool'):
ops.FlipBool()
return ops
class ParserList(objectfilter.GenericBinaryOperator):
"""Matches when a parser is inside a predefined list of parsers."""
def __init__(self, *children, **kwargs):
"""Construct the parser list and retrieve a list of available parsers."""
super(ParserList, self).__init__(*children, **kwargs)
self.compiled_list = presets.CATEGORIES.get(
self.right_operand.lower(), [])
def Operation(self, x, unused_y):
"""Return a bool depending on the parser list contains the parser."""
if self.left_operand != 'parser':
raise objectfilter.MalformedQueryError(
u'Unable to use keyword "inlist" for other than parser.')
if x in self.compiled_list:
return True
return False
class PlasoAttributeFilterImplementation(objectfilter.BaseFilterImplementation):
"""Does field name access on the lowercase version of names.
Useful to only access attributes and properties with Google's python naming
style.
"""
FILTERS = {}
FILTERS.update(objectfilter.BaseFilterImplementation.FILTERS)
FILTERS.update({'ValueExpander': PlasoValueExpander})
OPS = objectfilter.OP2FN
OPS.update({'inlist': ParserList,})
class DateCompareObject(object):
"""A specific class created for date comparison.
This object takes a date representation, whether that is a direct integer
datetime object or a string presenting the date, and uses that for comparing
against timestamps stored in microseconds in in microseconds since
Jan 1, 1970 00:00:00 UTC.
This makes it possible to use regular comparison operators for date,
irrelevant of the format the date comes in, since plaso stores all timestamps
in the same format, which is an integer/long, it is a simple manner of
changing the input into the same format (int) and compare that.
"""
def __init__(self, data):
"""Take a date object and use that for comparison.
Args:
data: A string, datetime object or an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
Raises:
ValueError: if the date string is invalid.
"""
if isinstance(data, py2to3.INTEGER_TYPES):
self.data = data
self.text = u'{0:d}'.format(data)
elif isinstance(data, float):
self.data = py2to3.LONG_TYPE(data)
self.text = u'{0:f}'.format(data)
elif isinstance(data, py2to3.STRING_TYPES):
if isinstance(data, py2to3.BYTES_TYPE):
self.text = data.decode(u'utf-8', errors=u'ignore')
else:
self.text = data
try:
self.data = timelib.Timestamp.FromTimeString(self.text)
except (ValueError, errors.TimestampError):
raise ValueError(u'Wrongly formatted date string: {0:s}'.format(
self.text))
elif isinstance(data, datetime.datetime):
self.data = timelib.Timestamp.FromPythonDatetime(data)
self.text = u'{0!s}'.format(data)
elif isinstance(data, DateCompareObject):
self.data = data.data
self.text = u'{0!s}'.format(data)
else:
raise ValueError(u'Unsupported type: {0:s}.'.format(type(data)))
def __cmp__(self, x):
"""A simple comparison operation."""
try:
x_date = DateCompareObject(x)
return cmp(self.data, x_date.data)
except ValueError:
return False
def __le__(self, x):
"""Less or equal comparison."""
return self.data <= x
def __ge__(self, x):
"""Greater or equal comparison."""
return self.data >= x
def __eq__(self, x):
"""Check if equal."""
return x == self.data
def __ne__(self, x):
"""Check if not equal."""
return x != self.data
def __str__(self):
"""Return a string representation of the object."""
return self.text
class BaseParser(objectfilter.Parser):
"""Plaso version of the Parser."""
expression_cls = PlasoExpression
class TimeRangeCache(object):
"""A class that stores timeranges from filters."""
MAX_INT64 = 2**64-1
@classmethod
def SetLowerTimestamp(cls, timestamp):
"""Sets the lower bound timestamp."""
if not hasattr(cls, '_lower'):
cls._lower = timestamp
return
if timestamp < cls._lower:
cls._lower = timestamp
@classmethod
def SetUpperTimestamp(cls, timestamp):
"""Sets the upper bound timestamp."""
if not hasattr(cls, '_upper'):
cls._upper = timestamp
return
if timestamp > cls._upper:
cls._upper = timestamp
@classmethod
def GetTimeRange(cls):
"""Return the first and last timestamp of filter range."""
first = getattr(cls, '_lower', 0)
last = getattr(cls, '_upper', cls.MAX_INT64)
if first < last:
return first, last
else:
return last, first
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Precision--recall curves and TensorFlow operations to create them.
NOTE: This module is in beta, and its API is subject to change, but the
data that it stores to disk will be supported forever.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorboard.plugins.pr_curve import metadata
# A value that we use as the minimum value during division of counts to prevent
# division by 0. 1.0 does not work: Certain weights could cause counts below 1.
_MINIMUM_COUNT = 1e-7
# The default number of thresholds.
_DEFAULT_NUM_THRESHOLDS = 201
def op(
name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None,
collections=None):
"""Create a PR curve summary op for a single binary classifier.
Computes true/false positive/negative values for the given `predictions`
against the ground truth `labels`, against a list of evenly distributed
threshold values in `[0, 1]` of length `num_thresholds`.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn
value at each threshold. This is then multiplied with `weights` which can be
used to reweight certain values, or more commonly used for masking values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values. A Tensor of `bool` values with arbitrary
shape.
predictions: A float32 `Tensor` whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
weights: Optional float32 `Tensor`. Individual counts are multiplied by this
value. This tensor must be either the same shape as or broadcastable to
the `labels` tensor.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. The float32 tensor
produced by the summary operation is of dimension (6, num_thresholds). The
first dimension (of length 6) is of the order: true positives,
false positives, true negatives, false negatives, precision, recall.
"""
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
dtype = predictions.dtype
with tf.name_scope(name, values=[labels, predictions, weights]):
tf.assert_type(labels, tf.bool)
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = tf.cast(labels, dtype)
# Ensure predictions are all in range [0.0, 1.0].
predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Before we begin, flatten predictions.
predictions = tf.reshape(predictions, [-1])
# Shape the labels so they are broadcast-able for later multiplication.
true_labels = tf.reshape(true_labels, [-1, 1])
false_labels = tf.reshape(false_labels, [-1, 1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
# Compute the bucket indices for each prediction value.
bucket_indices = tf.cast(
tf.floor(predictions * (num_thresholds - 1)), tf.int32)
# Bucket predictions.
tp_buckets = tf.reduce_sum(
tf.one_hot(bucket_indices, depth=num_thresholds) * true_labels,
axis=0)
fp_buckets = tf.reduce_sum(
tf.one_hot(bucket_indices, depth=num_thresholds) * false_labels,
axis=0)
# Set up the cumulative sums to compute the actual metrics.
tp = tf.cumsum(tp_buckets, reverse=True, name='tp')
fp = tf.cumsum(fp_buckets, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
def pb(name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
labels: The ground truth values. A bool numpy array.
predictions: A float32 numpy array whose values are in the range `[0, 1]`.
Dimensions must match those of `labels`.
num_thresholds: Optional number of thresholds, evenly distributed in
`[0, 1]`, to compute PR metrics for. When provided, should be an int of
value at least 2. Defaults to 201.
weights: Optional float or float32 numpy array. Individual counts are
multiplied by this value. This tensor must be either the same shape as
or broadcastable to the `labels` numpy array.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
"""
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return raw_data_pb(name,
true_positive_counts=tp,
false_positive_counts=fp,
true_negative_counts=tn,
false_negative_counts=fn,
precision=precision,
recall=recall,
num_thresholds=num_thresholds,
display_name=display_name,
description=description)
def streaming_op(name,
labels,
predictions,
num_thresholds=None,
weights=None,
metrics_collections=None,
updates_collections=None,
display_name=None,
description=None):
"""Computes a precision-recall curve summary across batches of data.
This function is similar to op() above, but can be used to compute the PR
curve across multiple batches of labels and predictions, in the same style
as the metrics found in tf.metrics.
This function creates multiple local variables for storing true positives,
true negative, etc. accumulated over each batch of data, and uses these local
variables for computing the final PR curve summary. These variables can be
updated with the returned update_op.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
num_thresholds: The number of evenly spaced thresholds to generate for
computing the PR curve. Defaults to 201.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
pr_curve: A string `Tensor` containing a single value: the
serialized PR curve Tensor summary. The summary contains a
float32 `Tensor` of dimension (6, num_thresholds). The first
dimension (of length 6) is of the order: true positives, false
positives, true negatives, false negatives, precision, recall.
update_op: An operation that updates the summary with the latest data.
"""
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
thresholds = [i / float(num_thresholds - 1)
for i in range(num_thresholds)]
with tf.name_scope(name, values=[labels, predictions, weights]):
tp, update_tp = tf.metrics.true_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fp, update_fp = tf.metrics.false_positives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
tn, update_tn = tf.metrics.true_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
fn, update_fn = tf.metrics.false_negatives_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
def compute_summary(tp, fp, tn, fn, collections):
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
pr_curve = compute_summary(tp, fp, tn, fn, metrics_collections)
update_op = tf.group(update_tp, update_fp, update_tn, update_fn)
if updates_collections:
for collection in updates_collections:
tf.add_to_collection(collection, update_op)
return pr_curve, update_op
def raw_data_op(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""Create an op that collects data for visualizing PR curves.
Unlike the op above, this one avoids computing precision, recall, and the
intermediate counts. Instead, it accepts those tensors as arguments and
relies on the caller to ensure that the calculations are correct (and the
counts yield the provided precision and recall values).
This op is useful when a caller seeks to compute precision and recall
differently but still use the PR curves plugin.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 tensor of true positive counts. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
false_positive_counts: A rank-1 tensor of false positive counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
true_negative_counts: A rank-1 tensor of true negative counts. Must contain
`num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
false_negative_counts: A rank-1 tensor of false negative counts. Must
contain `num_thresholds` elements and be castable to float32. Values
correspond to thresholds that increase from left to right (from 0 to 1).
precision: A rank-1 tensor of precision values. Must contain
`num_thresholds` elements and be castable to float32. Values correspond
to thresholds that increase from left to right (from 0 to 1).
recall: A rank-1 tensor of recall values. Must contain `num_thresholds`
elements and be castable to float32. Values correspond to thresholds
that increase from left to right (from 0 to 1).
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be `>= 2`. This value should be a
constant integer value, not a Tensor that stores an integer.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
with tf.name_scope(name, values=[
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
]):
return _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
display_name,
description,
collections)
def raw_data_pb(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None):
"""Create a PR curves summary protobuf from raw data values.
Args:
name: A tag attached to the summary. Used by TensorBoard for organization.
true_positive_counts: A rank-1 numpy array of true positive counts. Must
contain `num_thresholds` elements and be castable to float32.
false_positive_counts: A rank-1 numpy array of false positive counts. Must
contain `num_thresholds` elements and be castable to float32.
true_negative_counts: A rank-1 numpy array of true negative counts. Must
contain `num_thresholds` elements and be castable to float32.
false_negative_counts: A rank-1 numpy array of false negative counts. Must
contain `num_thresholds` elements and be castable to float32.
precision: A rank-1 numpy array of precision values. Must contain
`num_thresholds` elements and be castable to float32.
recall: A rank-1 numpy array of recall values. Must contain `num_thresholds`
elements and be castable to float32.
num_thresholds: Number of thresholds, evenly distributed in `[0, 1]`, to
compute PR metrics for. Should be an int `>= 2`.
display_name: Optional name for this summary in TensorBoard, as a `str`.
Defaults to `name`.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Returns:
A summary operation for use in a TensorFlow graph. See docs for the `op`
method for details on the float32 tensor produced by this summary.
"""
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
summary = tf.Summary()
data = np.stack(
(true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall))
tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)
summary.value.add(tag='%s/pr_curves' % name,
metadata=summary_metadata,
tensor=tensor)
return summary
def _create_tensor_summary(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
collections=None):
"""A private helper method for generating a tensor summary.
We use a helper method instead of having `op` directly call `raw_data_op`
to prevent the scope of `raw_data_op` from being embedded within `op`.
Arguments are the same as for raw_data_op.
Returns:
A tensor summary that collects data for PR curves.
"""
# Store the number of thresholds within the summary metadata because
# that value is constant for all pr curve summaries with the same tag.
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or '',
num_thresholds=num_thresholds)
# Store values within a tensor. We store them in the order:
# true positives, false positives, true negatives, false
# negatives, precision, and recall.
combined_data = tf.stack([
tf.cast(true_positive_counts, tf.float32),
tf.cast(false_positive_counts, tf.float32),
tf.cast(true_negative_counts, tf.float32),
tf.cast(false_negative_counts, tf.float32),
tf.cast(precision, tf.float32),
tf.cast(recall, tf.float32)])
return tf.summary.tensor_summary(
name='pr_curves',
tensor=combined_data,
collections=collections,
summary_metadata=summary_metadata)
|
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing Scripts for the Redial Dataset. Reads redial data unformatted
from https://redialdata.github.io/website/ and saves it as a JSONL object of
{"conversation": input, "response": output} for easy use as training data."""
import json
import re
import os
import numpy as np
from tqdm import tqdm
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("data_dir", "./data/redial",
"path to folder with redial data")
flags.DEFINE_boolean("extra_redial_stats", False, "print extra summaries")
RD_UNFORMATTED_FNAMES = {
"train": "rd-train-data.jsonl",
"test": "rd-test-data.jsonl"
}
RD_FORMATTED_FNAMES = {
"train": "rd-train-formatted.jsonl",
"test": "rd-test-formatted.jsonl"
}
def main(_):
"""Processes raw redial data in data_dir and saves the results."""
logging.info("--Loading Redial Dataset--")
train = read_jsonl(os.path.join(FLAGS.data_dir,
RD_UNFORMATTED_FNAMES["train"]))
test = read_jsonl(os.path.join(FLAGS.data_dir,
RD_UNFORMATTED_FNAMES["test"]))
logging.info("--Replacing Movie IDs--")
for dialogue in tqdm(train):
replace_ids(dialogue)
for dialogue in tqdm(test):
replace_ids(dialogue)
logging.info("-Formatting For Training--")
train_formatted = separate_responses(train)
test_formatted = separate_responses(test)
write_jsonl(os.path.join(FLAGS.data_dir, RD_FORMATTED_FNAMES["train"]),
train_formatted)
write_jsonl(os.path.join(FLAGS.data_dir, RD_FORMATTED_FNAMES["test"]),
test_formatted)
if FLAGS.extra_redial_stats:
length_summary(FLAGS.data_dir)
def length_summary(data_dir):
"""prints a five number summary of the lengths of redial input/outputs."""
print("--Loading Dataset For Summary--")
train_data = read_jsonl(os.path.join(data_dir,
RD_FORMATTED_FNAMES["train"]))
test_data = read_jsonl(os.path.join(data_dir,
RD_FORMATTED_FNAMES["validation"]))
def len_function(key):
return lambda x: len(x[key].split())
lengths = {
"train_inputs": list(map(len_function("conversation"), train_data)),
"train_targets": list(map(len_function("response"), train_data)),
"test_inputs": list(map(len_function("conversation"), test_data)),
"test_targets": list(map(len_function("response"), test_data))
}
for name, length_array in lengths.items():
logging.info(name.upper() + ": ")
quartile_summary(length_array)
#Helper Functions
def read_jsonl(filename):
"""Reads a jsonl file and returns as array of dicts."""
with tf.io.gfile.GFile(filename, 'r') as json_file:
json_list = list(json_file)
data = []
for json_str in tqdm(json_list):
data.append(json.loads(json_str))
return data
def write_jsonl(filename, arr):
"""Writes array to jsonl."""
with open(filename, 'w') as f:
for line in arr:
f.write(json.dumps(line) + "\n")
def replace_ids(dialogue):
"""Replaces movie ids in one redial dialogue with their corresponding movie
titles. Each movie is surrounded by '@' tokens to separate from the rest of
the dialogue. Done in-place on dialogue passed as argument"""
movie_titles = dialogue["movieMentions"]
for message in dialogue["messages"]:
text = message["text"]
replaced = []
for word in text.split():
if word[0] == "@" and re.sub('\\D', '', word) in movie_titles:
movie_id = re.sub('\\D', '', word)
replaced.append("@ " + movie_titles[movie_id] + " @")
else:
replaced.append(word)
message["text"] = " ".join(replaced)
def separate_responses(dataset):
"""Creates a dataset of {"previous conversation" : "recommender response"}
dictionaries for every response by the recommending party in every
conversation in the dataset. Turns are separated by either a [Assistant] or
[User] token which indicates if the next messages were said by a user or an
assistant"""
result = []
for dialogue in tqdm(dataset):
conversation = "" # the conversation history up until a turn
turn = "" # consecutive messages made by the same actor
prev_id = None
metadata = {
"user_movies": [],
"assistant_movies": []
}
# The initator and respondent fill out surveys labeling the movies mentioned
# This combines their answers to account for partial responses and defaults
# to the initiator's answer in the case of an inconsistency
combined_responses = {**dict(dialogue["respondentQuestions"]),
**dict(dialogue["initiatorQuestions"])}
if dialogue["movieMentions"] != []:
for movie_id, title in dialogue["movieMentions"].items():
# if the movie is not labeled, default to "assistant_movies"
if title is None:
title = ""
if movie_id not in combined_responses or \
combined_responses[movie_id]["suggested"]:
metadata["assistant_movies"].append(title.lower())
else:
metadata["user_movies"].append(title.lower())
# Adding a dummy message from the the initiator makes sure we iterate
# through the end of the array
empty_message = {"senderWorkerId": dialogue["initiatorWorkerId"],
"text": ""}
dialogue["messages"].append(empty_message)
for message in dialogue["messages"]:
if (prev_id is not None) and message["senderWorkerId"] != prev_id:
if message["senderWorkerId"] == dialogue["initiatorWorkerId"]:
# if the turn has switched to the user, add the
# (conversation, response) pair to response
result.append({"conversation": conversation.strip(),
"response": turn.strip(), "metadata": metadata})
conversation += " [Assistant] " + turn.strip()
else:
conversation += " [User] " + turn.strip()
turn = ""
prev_id = message["senderWorkerId"]
turn += message["text"] + " "
return result
def array_preview(name, arr):
"""Helper function for checking out arrays."""
if arr:
logging.info(name.upper())
logging.info("----------------")
logging.info("shape: " + str(np.shape(arr)))
logging.info("first element :")
logging.info(arr[0])
def conversation_preview(example):
conversation = example["conversation"] + " [Assistant] " + example["response"]
for message in re.split(r"\[([^]]+)\]", conversation):
print(message)
print("USER MENTIONED MOVIES: ")
print(example["metadata"]["user_movies"])
print("ASSISTANT MENTIONED MOVIES: ")
print(example["metadata"]["assistant_movies"])
def quartile_summary(arr):
"Prints the five number summary for a 1d array"
quartiles = np.percentile(arr, [25, 50, 75])
logging.info('MIN: {:d}'.format(min(arr)))
logging.info('Q1: {:d}'.format(int(quartiles[0])))
logging.info('MED: {:d}'.format(int(quartiles[1])))
logging.info('Q3: {:d}'.format(int(quartiles[2])))
logging.info('MAX: {:d}'.format(max(arr)))
if __name__ == "__main__":
app.run(main)
|
|
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest, HttpResponse
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.core import hooks
from wagtail.core.compat import AUTH_USER_APP_LABEL, AUTH_USER_MODEL_NAME
from wagtail.core.models import Collection, GroupCollectionPermission, GroupPagePermission, Page
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.forms import UserCreationForm, UserEditForm
from wagtail.users.models import UserProfile
from wagtail.users.views.users import get_user_creation_form, get_user_edit_form
delete_user_perm_codename = "delete_{0}".format(AUTH_USER_MODEL_NAME.lower())
change_user_perm_codename = "change_{0}".format(AUTH_USER_MODEL_NAME.lower())
def test_avatar_provider(user, default, size=50):
return '/nonexistent/path/to/avatar.png'
class CustomUserCreationForm(UserCreationForm):
country = forms.CharField(required=True, label="Country")
attachment = forms.FileField(required=True, label="Attachment")
class CustomUserEditForm(UserEditForm):
country = forms.CharField(required=True, label="Country")
attachment = forms.FileField(required=True, label="Attachment")
class TestUserFormHelpers(TestCase):
def test_get_user_edit_form_with_default_form(self):
user_form = get_user_edit_form()
self.assertIs(user_form, UserEditForm)
def test_get_user_creation_form_with_default_form(self):
user_form = get_user_creation_form()
self.assertIs(user_form, UserCreationForm)
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationForm'
)
def test_get_user_creation_form_with_custom_form(self):
user_form = get_user_creation_form()
self.assertIs(user_form, CustomUserCreationForm)
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditForm'
)
def test_get_user_edit_form_with_custom_form(self):
user_form = get_user_edit_form()
self.assertIs(user_form, CustomUserEditForm)
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationFormDoesNotExist'
)
def test_get_user_creation_form_with_invalid_form(self):
self.assertRaises(ImproperlyConfigured, get_user_creation_form)
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditFormDoesNotExist'
)
def test_get_user_edit_form_with_invalid_form(self):
self.assertRaises(ImproperlyConfigured, get_user_edit_form)
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='password',
first_name='First Name',
last_name='Last Name'
)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/wagtail/wagtail/issues/565
get_user_model().objects.create_user('guardian', '[email protected]', 'gu@rd14n', pk=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_search_query_one_field(self):
response = self.get({'q': "first name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_search_query_multiple_fields(self):
response = self.get({'q': "first name last name"})
self.assertEqual(response.status_code, 200)
results = response.context['users'].object_list
self.assertIn(self.test_user, results)
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, '[email protected]')
@override_settings(
WAGTAIL_USER_CREATION_FORM='wagtail.users.tests.CustomUserCreationForm',
WAGTAIL_USER_CUSTOM_FIELDS=['country', 'document'],
)
def test_create_with_custom_form(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
'country': "testcountry",
'attachment': SimpleUploadedFile('test.txt', b"Uploaded file"),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, '[email protected]')
self.assertEqual(users.first().country, 'testcountry')
self.assertEqual(users.first().attachment.read(), b"Uploaded file")
def test_create_with_password_mismatch(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "password1",
'password2': "password2",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
)
def test_create_with_password_validation(self):
"""
Test that the Django password validators are run when creating a user.
Specifically test that the UserAttributeSimilarityValidator works,
which requires a full-populated user model before the validation works.
"""
# Create a user with a password the same as their name
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Example",
'last_name': "Name",
'password1': "example name",
'password2': "example name",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
# Password field should have an error
errors = response.context['form'].errors.as_data()
self.assertIn('password2', errors)
self.assertEqual(errors['password2'][0].code, 'password_too_similar')
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
def test_create_with_missing_password(self):
"""Password should be required by default"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "",
'password2': "",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password1'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_password_fields_exist_when_not_required(self):
"""Password fields should still be shown if WAGTAILUSERS_PASSWORD_REQUIRED is False"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_create_with_password_not_required(self):
"""Password should not be required if WAGTAILUSERS_PASSWORD_REQUIRED is False"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "",
'password2': "",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, '[email protected]')
self.assertEqual(users.first().password, '')
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_optional_password_is_still_validated(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, password validation should still apply if a password _is_ supplied"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "banana",
'password2': "kumquat",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
@override_settings(WAGTAILUSERS_PASSWORD_REQUIRED=False)
def test_password_still_accepted_when_optional(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, we should still allow a password to be set"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "banana",
'password2': "banana",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, '[email protected]')
self.assertTrue(users.first().check_password('banana'))
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_not_shown_when_disabled(self):
"""WAGTAILUSERS_PASSWORD_ENABLED=False should cause password fields to be removed"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertNotContains(response, 'Password:')
self.assertNotContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_ignored_when_disabled(self):
"""When WAGTAILUSERS_PASSWORD_ENABLED is False, users should always be created without a usable password"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Test",
'last_name': "User",
'password1': "banana", # not part of the form - should be ignored
'password2': "kumquat", # not part of the form - should be ignored
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, '[email protected]')
self.assertEqual(users.first().password, '')
def test_before_create_user_hook(self):
def hook_func(request):
self.assertIsInstance(request, HttpRequest)
return HttpResponse("Overridden!")
with self.register_hook('before_create_user', hook_func):
response = self.client.get(
reverse('wagtailusers_users:add')
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_create_user_hook_post(self):
def hook_func(request):
self.assertIsInstance(request, HttpRequest)
return HttpResponse("Overridden!")
with self.register_hook('before_create_user', hook_func):
post_data = {
'username': "testuser",
'email': "[email protected]",
'password1': 'password12',
'password2': 'password12',
'first_name': 'test',
'last_name': 'user',
}
response = self.client.post(
reverse('wagtailusers_users:add'),
post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_create_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertIsInstance(user, get_user_model())
return HttpResponse("Overridden!")
with self.register_hook('after_create_user', hook_func):
post_data = {
'username': "testuser",
'email': "[email protected]",
'password1': 'password12',
'password2': 'password12',
'first_name': 'test',
'last_name': 'user',
}
response = self.client.post(
reverse('wagtailusers_users:add'),
post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='password'
)
# also create a superuser to delete
self.superuser = get_user_model().objects.create_superuser(
username='testsuperuser',
email='[email protected]',
password='password'
)
self.current_user = self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
def test_delete(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
def test_user_cannot_delete_self(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.current_user.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.current_user.pk).exists())
def test_user_can_delete_other_superuser(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(username='testsuperuser')
self.assertEqual(users.count(), 0)
def test_before_delete_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_delete_user', hook_func):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_delete_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_delete_user', hook_func):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_delete_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.username, self.test_user.username)
return HttpResponse("Overridden!")
with self.register_hook('after_delete_user', hook_func):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserDeleteViewForNonSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='password'
)
# create a user with delete permission
self.deleter_user = get_user_model().objects.create_user(
username='deleter',
email='[email protected]',
password='password'
)
deleters_group = Group.objects.create(name='User deleters')
deleters_group.permissions.add(Permission.objects.get(codename='access_admin'))
deleters_group.permissions.add(Permission.objects.get(
content_type__app_label=AUTH_USER_APP_LABEL, codename=delete_user_perm_codename
))
self.deleter_user.groups.add(deleters_group)
self.superuser = self.create_test_user()
self.client.login(username='deleter', password='password')
def test_simple(self):
response = self.client.get(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/confirm_delete.html')
def test_delete(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.test_user.pk,)))
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was deleted
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
def test_user_cannot_delete_self(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.deleter_user.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.deleter_user.pk).exists())
def test_user_cannot_delete_superuser(self):
response = self.client.post(reverse('wagtailusers_users:delete', args=(self.superuser.pk,)))
# Should redirect to admin index (permission denied)
self.assertRedirects(response, reverse('wagtailadmin_home'))
# Check user was not deleted
self.assertTrue(get_user_model().objects.filter(pk=self.superuser.pk).exists())
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
first_name='Original',
last_name='User',
password='password'
)
# Login
self.current_user = self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.pk, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.pk, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertContains(response, 'Password:')
self.assertContains(response, 'Password confirmation:')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_simple_post(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "newpassword",
'password2': "newpassword",
'is_active': 'on'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('newpassword'))
def test_password_optional(self):
"""Leaving password fields blank should leave it unchanged"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "",
'password2': "",
'is_active': 'on'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited but password is unchanged
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('password'))
def test_passwords_match(self):
"""Password fields should be validated if supplied"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "banana",
'password2': "kumquat",
'is_active': 'on'
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Original')
self.assertTrue(user.check_password('password'))
@override_settings(
AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
],
)
def test_edit_with_password_validation(self):
"""
Test that the Django password validators are run when editing a user.
Specifically test that the UserAttributeSimilarityValidator works,
which requires a full-populated user model before the validation works.
"""
# Create a user with a password the same as their name
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "Name",
'password1': "edited name",
'password2': "edited name",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
# Password field should have an error
errors = response.context['form'].errors.as_data()
self.assertIn('password2', errors)
self.assertEqual(errors['password2'][0].code, 'password_too_similar')
# Check that the user was not edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Original')
self.assertTrue(user.check_password('password'))
def test_edit_and_deactivate(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
# Leaving out these fields, thus setting them to False:
# 'is_active': 'on'
# 'is_superuser': 'on',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
# Check that the user is no longer superuser
self.assertEqual(user.is_superuser, False)
# Check that the user is no longer active
self.assertEqual(user.is_active, False)
def test_edit_and_make_superuser(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
'is_active': 'on',
'is_superuser': 'on',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
# Check that the user is now superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is now active
self.assertEqual(user.is_active, True)
def test_edit_self(self):
response = self.post({
'username': '[email protected]',
'email': '[email protected]',
'first_name': "Edited Myself",
'last_name': "User",
# 'password1': "password",
# 'password2': "password",
'is_active': 'on',
'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check that the user is still superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is still active
self.assertEqual(user.is_active, True)
def test_editing_own_password_does_not_log_out(self):
response = self.post({
'username': '[email protected]',
'email': '[email protected]',
'first_name': "Edited Myself",
'last_name': "User",
'password1': "c0rrecth0rse",
'password2': "c0rrecth0rse",
'is_active': 'on',
'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check user is not logged out
response = self.client.get(reverse('wagtailusers_users:index'))
self.assertEqual(response.status_code, 200)
def test_cannot_demote_self(self):
"""
check that unsetting a user's own is_active or is_superuser flag has no effect
"""
response = self.post({
'username': '[email protected]',
'email': '[email protected]',
'first_name': "Edited Myself",
'last_name': "User",
# 'password1': "password",
# 'password2': "password",
# failing to submit is_active or is_superuser would unset those flags,
# if we didn't explicitly prevent that when editing self
# 'is_active': 'on',
# 'is_superuser': 'on',
}, self.current_user.pk)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.current_user.pk)
self.assertEqual(user.first_name, 'Edited Myself')
# Check that the user is still superuser
self.assertEqual(user.is_superuser, True)
# Check that the user is still active
self.assertEqual(user.is_active, True)
@override_settings(
WAGTAIL_USER_EDIT_FORM='wagtail.users.tests.CustomUserEditForm',
)
def test_edit_with_custom_form(self):
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
'country': "testcountry",
'attachment': SimpleUploadedFile('test.txt', b"Uploaded file"),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertEqual(user.country, 'testcountry')
self.assertEqual(user.attachment.read(), b"Uploaded file")
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "[email protected]",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_not_shown_when_disabled(self):
"""WAGTAILUSERS_PASSWORD_ENABLED=False should cause password fields to be removed"""
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
self.assertNotContains(response, 'Password:')
self.assertNotContains(response, 'Password confirmation:')
@override_settings(WAGTAILUSERS_PASSWORD_ENABLED=False)
def test_password_fields_ignored_when_disabled(self):
"""When WAGTAILUSERS_PASSWORD_REQUIRED is False, existing password should be left unchanged"""
response = self.post({
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'is_active': 'on',
'password1': "banana", # not part of the form - should be ignored
'password2': "kumquat", # not part of the form - should be ignored
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited but password is unchanged
user = get_user_model().objects.get(pk=self.test_user.pk)
self.assertEqual(user.first_name, 'Edited')
self.assertTrue(user.check_password('password'))
def test_before_edit_user_hook(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_edit_user', hook_func):
response = self.client.get(reverse('wagtailusers_users:edit', args=(self.test_user.pk, )))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_edit_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('before_edit_user', hook_func):
post_data = {
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.test_user.pk, )), post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_after_edit_user_hook_post(self):
def hook_func(request, user):
self.assertIsInstance(request, HttpRequest)
self.assertEqual(user.pk, self.test_user.pk)
return HttpResponse("Overridden!")
with self.register_hook('after_edit_user', hook_func):
post_data = {
'username': "testuser",
'email': "[email protected]",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.test_user.pk, )), post_data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='password',
)
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.wagtail_userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
def test_avatar_empty_on_profile_creation(self):
user_profile = UserProfile.get_for_user(self.test_user)
self.assertFalse(user_profile.avatar)
class TestUserEditViewForNonSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
# create a user with edit permission
self.editor_user = get_user_model().objects.create_user(
username='editor',
email='[email protected]',
password='password'
)
editors_group = Group.objects.create(name='User editors')
editors_group.permissions.add(Permission.objects.get(codename='access_admin'))
editors_group.permissions.add(Permission.objects.get(
content_type__app_label=AUTH_USER_APP_LABEL, codename=change_user_perm_codename
))
self.editor_user.groups.add(editors_group)
self.client.login(username='editor', password='password')
def test_user_cannot_escalate_privileges(self):
"""
Check that a non-superuser cannot edit their own is_active or is_superuser flag.
(note: this doesn't necessarily guard against other routes to escalating privileges, such
as creating a new user with is_superuser=True or adding oneself to a group with additional
privileges - the latter will be dealt with by #537)
"""
editors_group = Group.objects.get(name='User editors')
post_data = {
'username': "editor",
'email': "[email protected]",
'first_name': "Escalating",
'last_name': "User",
'password1': "",
'password2': "",
'groups': [editors_group.id, ],
# These should not be possible without manipulating the form in the DOM:
'is_superuser': 'on',
'is_active': 'on',
}
response = self.client.post(
reverse('wagtailusers_users:edit', args=(self.editor_user.pk, )),
post_data)
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
user = get_user_model().objects.get(pk=self.editor_user.pk)
# check if user is still in the editors group
self.assertTrue(user.groups.filter(name='User editors').exists())
# check that non-permission-related edits went ahead
self.assertEqual(user.first_name, "Escalating")
# Check that the user did not escalate its is_superuser status
self.assertEqual(user.is_superuser, False)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['search_form']['q'].value(), "Hello")
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
'document_permissions-TOTAL_FORMS': ['0'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['0'],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['edit', 'publish'],
'page_permissions-TOTAL_FORMS': ['1'],
'document_permissions-0-collection': [Collection.get_first_root_node().pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'document_permissions-TOTAL_FORMS': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
# and one 'add document' collection permission
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
self.assertEqual(
new_group.collection_permissions.filter(permission=self.add_doc_permission).count(),
1
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['publish'],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
root_collection = Collection.get_first_root_node()
response = self.post({
'name': "test group",
'document_permissions-0-collection': [root_collection.pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'document_permissions-1-collection': [root_collection.pk],
'document_permissions-1-permissions': [self.change_doc_permission.pk],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
# (we don't know what index in permission_panels the formset will be,
# so just assert that it happens on at least one permission_panel)
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
def test_can_submit_blank_permission_form(self):
# the formsets for page / collection permissions should gracefully
# handle (and ignore) forms that have been left entirely blank
response = self.post({
'name': "test group",
'page_permissions-0-page': [''],
'page_permissions-TOTAL_FORMS': ['1'],
'document_permissions-0-collection': [''],
'document_permissions-TOTAL_FORMS': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with no page / document permissions
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 0)
self.assertEqual(
new_group.collection_permissions.filter(permission=self.add_doc_permission).count(),
0
)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(pk=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
self.home_page = Page.objects.get(pk=2)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# set up collections to test document permissions
self.root_collection = Collection.get_first_root_node()
self.evil_plans_collection = self.root_collection.add_child(name="Evil plans")
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
GroupCollectionPermission.objects.create(
group=self.test_group,
collection=self.evil_plans_collection,
permission=self.add_doc_permission,
)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.pk, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.pk],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'],
'page_permissions-0-page': [self.root_page.pk],
'page_permissions-0-permission_types': ['add'],
'document_permissions-TOTAL_FORMS': ['1'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['1'],
'document_permissions-0-collection': [self.evil_plans_collection.pk],
'document_permissions-0-permissions': [self.add_doc_permission.pk],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in post_defaults.items():
post_data[k] = post_data.get(k, v)
return self.client.post(reverse(
'wagtailusers_groups:edit', args=(group_id or self.test_group.pk, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(pk__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(pk=self.test_group.pk)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions_same_page(self):
# The test group has one page permission to begin with - 'add' permission on root.
# Add two additional permission types on the root page
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-permission_types': ['add', 'publish', 'edit'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_adding_document_permissions_same_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'change' permission on evil_plans
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-permissions': [
self.add_doc_permission.pk, self.change_doc_permission.pk
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has two document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
2
)
def test_group_edit_adding_document_permissions_different_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'add' and 'change' permission on the root collection
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-TOTAL_FORMS': ['2'],
'document_permissions-1-collection': [self.root_collection.pk],
'document_permissions-1-permissions': [
self.add_doc_permission.pk, self.change_doc_permission.pk
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
3
)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_deleting_document_permissions(self):
# The test group has one document permission to begin with
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
0
)
def test_group_edit_loads_with_django_permissions_shown(self):
# the checkbox for self.existing_permission should be ticked
response = self.get()
# use allow_extra_attrs because the input will also have an id (with an unpredictable value)
self.assertTagInHTML(
'<input name="permissions" type="checkbox" checked value="%s">' % self.existing_permission.id,
str(response.content),
allow_extra_attrs=True)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(
page_permissions_formset.management_form['INITIAL_FORMS'].value(),
1
)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
page_permissions_formset.forms[0]['permission_types'].value(),
['add']
)
# add edit permission on root
GroupPagePermission.objects.create(
page=self.root_page, permission_type='edit', group=self.test_group
)
# The test group now has two page permissions on root (but only one form covering both)
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(len(page_permissions_formset.forms), 1)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
set(page_permissions_formset.forms[0]['permission_types'].value()),
set(['add', 'edit'])
)
# add edit permission on home
GroupPagePermission.objects.create(
page=self.home_page, permission_type='edit', group=self.test_group
)
# The test group now has three page permissions, over two forms
self.assertEqual(self.test_group.page_permissions.count(), 3)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.pk
)
self.assertEqual(
set(page_permissions_formset.forms[0]['permission_types'].value()),
set(['add', 'edit'])
)
self.assertEqual(
page_permissions_formset.forms[1]['page'].value(),
self.home_page.pk
)
self.assertEqual(
page_permissions_formset.forms[1]['permission_types'].value(),
['edit']
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'page_permissions-1-page': [self.root_page.pk],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
response = self.post({
'document_permissions-1-page': [self.evil_plans_collection.pk],
'document_permissions-1-permissions': [self.change_doc_permission],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.pk, self.another_permission.pk]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.pk, self.another_permission.pk]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
|
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from random import choice, randint
import support as sp
from common import DEBUG, CONFIG
from player import Player as p
from pov_deck import POV_DECK_BYTES
TRIGGER_BYTES = ['\x21', '\x5f', '\x3c', '\x42', '\x63', '\x51']
# 'NAME': (IDX, BYTE)
TRIGGER_BYTE_LOOKUP = {
'DECK': (0, TRIGGER_BYTES[0]),
'DEAL': (1, TRIGGER_BYTES[1]),
'DATA': (2, TRIGGER_BYTES[2]),
'BALANCE': (3, TRIGGER_BYTES[3]),
'COUNT': (4, TRIGGER_BYTES[4]),
'QUIT': (5, TRIGGER_BYTES[5]),
}
def get_non_trigger_byte():
b = chr(randint(0, 255))
while b in TRIGGER_BYTES:
b = chr(randint(0,255))
return b
class Dealer(object):
def __init__(self, magic_page):
self.players = []
self.magic_page = magic_page
self.deck = []
self.ok = ''
def add_players(self, p_count):
for i in range(1, p_count + 1):
noob = p(i, self.magic_page)
self.players.append(noob)
self.balances = [0 for _ in range(p_count + 1)]
self.wagers = [0 for _ in range(p_count + 1)]
self.results = [0 for _ in range(p_count + 1)]
self.deal1 = [0 for _ in range(p_count + 1)]
self.deal2 = [0 for _ in range(p_count + 1)]
if DEBUG:
print "added {0} players".format(p_count)
def make_trigger_buf(self, cmd_name):
cmd = ''
for cn in ['DECK', 'DEAL', 'DATA', 'BALANCE', 'COUNT', 'QUIT']:
if cn == cmd_name:
(idx, b) = TRIGGER_BYTE_LOOKUP[cmd_name]
cmd += b
else:
cmd += get_non_trigger_byte()
return cmd
def make_new_random_deck(self, count, bad_cards=False):
deck = [chr(randint(CONFIG['MIN_CARD_VALUE'], CONFIG['MAX_CARD_VALUE'])) for _ in range(count)]
if True == bad_cards:
deck[count/2] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
deck[count-1] = chr(randint(CONFIG['MAX_CARD_VALUE'] + 1, 255))
if DEBUG:
print "making new deck with {0} cards with {1}bad cards".format(count, "" if bad_cards else "no ")
# if DEBUG:
# print "deck:"
# for c in deck:
# print " card 0x{0:02x},".format(ord(c))
return deck
def gen_ok(self):
ok = [0,0,0,0,0,0,0,0]
for idx in range(len(self.magic_page)):
ok[idx % 8] ^= ord(self.magic_page[idx])
self.ok = ''.join([chr(c) for c in ok])
def get_packed_balances(self):
p_bal = ''
if DEBUG:
print " balances were: {0}".format(self.balances)
for b in self.balances:
p_bal += sp.pack_single_int32(b)
return p_bal
def get_packed_wagers(self):
p_wag = ''
if DEBUG:
print " wagers were: {0}".format(self.wagers)
for w in self.wagers:
p_wag += sp.pack_single_uint32(w)
return p_wag
def get_packed_results(self):
p_res = ''
if DEBUG:
print " results were: {0}".format(self.results)
for r in self.results:
p_res += sp.pack_single_uint32(r)
return p_res
def enough_cards_for_round(self):
return 0 < len(self.deck) and len(self.deck) >= len(self.players)
def calculate_round_results(self):
max_sum = 0
if DEBUG:
print "calculating round results"
for idx in range(1, len(self.results)):
if max_sum < self.deal1[idx] + self.deal2[idx]:
max_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " updated round max_sum {0}".format(max_sum)
for idx in range(1, len(self.results)):
if DEBUG:
print " calculating results for player {0}".format(idx)
my_sum = self.deal1[idx] + self.deal2[idx]
if DEBUG:
print " round sum {1}".format(idx, my_sum)
if my_sum == max_sum:
self.results[idx] = CONFIG['WIN']
if DEBUG:
" WIN".format(idx)
else:
self.results[idx] = CONFIG['LOSS']
if DEBUG:
" LOSS".format(idx)
def get_next_card(self):
c = self.deck.pop()
if DEBUG:
print "got next card 0x{0:02x}".format(ord(c))
return c
def play_one_round(self):
if DEBUG:
print "play one round with {0} players".format(len(self.players))
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal1[p.id] = ord(c)
p.cards[0] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 1: {0}".format(self.deal1)
# get wager from each player
for p in self.players:
self.wagers[p.id] = p.get_wager()
if DEBUG:
print " player {0} wagered {1}".format(p.id, self.wagers[p.id])
# deal card to each player
for p in self.players:
c = self.get_next_card()
self.deal2[p.id] = ord(c)
p.cards[1] = ord(c)
if DEBUG:
print " player {0} dealt card 0x{1:02x}".format(p.id, ord(c))
if DEBUG:
print " cards from deal 2: {0}".format(self.deal2)
self.calculate_round_results()
# pay each player
for p in self.players:
if CONFIG['WIN'] == self.results[p.id]:
p.exchange_money(self.wagers[p.id])
p.win()
self.balances[p.id] += self.wagers[p.id]
else:
p.exchange_money(-self.wagers[p.id])
p.loss()
self.balances[p.id] += -self.wagers[p.id]
## For testing ##
def total_magic_page_indices_used(self):
mpiu = set()
for p in self.players:
mpiu.update(p.magic_page_indices_used)
mpiu_l = list(mpiu)
mpiu_l.sort()
return mpiu_l
def check_magic_bytes_usage(self):
mpiu_list = self.total_magic_page_indices_used()
# print " total set of magic_page_indices_used: {0}".format(mpiu_list)
for idx in range(len(mpiu_list) - 3):
i0 = mpiu_list[idx]
i1 = mpiu_list[idx + 1]
i2 = mpiu_list[idx + 2]
i3 = mpiu_list[idx + 3]
if i0 == i1 - 1 and \
i1 == i2 - 1 and \
i2 == i3 - 1:
if DEBUG:
print "#### Warning 4 consecutive magic page indices: {0}, {1}, {2}, {3}.".format(i0, i1, i2, i3)
print "magic total_magic_page_indices_used: {0}".format(mpiu_list)
return [i0, i1, i2, i3]
if DEBUG:
print " Done checking for 4 consecutive magic page indices."
return []
def check_deck(deck_sz=1000):
if DEBUG:
print "find_trigger_deck size {0}".format(deck_sz)
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
# make deck
deck = d.make_new_random_deck(deck_sz)
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
if DEBUG:
print "indices {0}".format(mpiu_list)
return deck
return []
def find_trigger_deck():
deck_sz = 2500
deck = check_deck(deck_sz)
while [] == deck:
deck_sz += 10
deck = check_deck(deck_sz)
if DEBUG:
print "trigger deck len {0}, deck {1}".format(len(deck), deck)
return deck
def test_trigger_deck(deck):
if DEBUG:
print "test_trigger_deck"
# setup
d = Dealer('A'*4096)
d.add_players(CONFIG['PLAYER_COUNT'])
d.deck = list(deck)
mpiu_list = []
while d.enough_cards_for_round():
# play a round
d.play_one_round()
# break if found 4 consecutive fp indices
mpiu_list = d.check_magic_bytes_usage()
if [] != mpiu_list:
# if DEBUG:
print "trigger indices {0}".format(mpiu_list)
return True
return False
if __name__ == '__main__':
# deck = find_trigger_deck()
deck = POV_DECK_BYTES
print test_trigger_deck(deck)
|
|
#----------------------------------------------------------------------------
# Name: clroses.py
# Purpose: Class definitions for Roses interactive display programs.
#
# Author: Ric Werme
# WWW: http://WermeNH.com/roses
#
# Created: June 2007
# CVS-ID: $Id$
# Copyright: Public Domain, please give credit where credit is due.
# License: Sorry, no EULA.
#----------------------------------------------------------------------------
# This is yet another incarnation of an old graphics hack based around
# misdrawing an analytic geometry curve called a rose. The basic form is
# simply the polar coordinate function r = cos(a * theta). "a" is the
# "order" of the rose, a zero value degenerates to r = 1, a circle. While
# this program is happy to draw that, much more interesting things happen when
# one or more of the following is in effect:
# 1) The "delta theta" between points is large enough to distort the curve,
# e.g. 90 degrees will draw a square, slightly less will be interesting.
# 2) The order of the rose is too large to draw it accurately.
# 3) Vectors are drawn at less than full speed.
# 4) The program is stepping through different patterns on its own.
# While you will be able to predict some aspects of the generated patterns,
# a lot of what there is to be found is found at random!
# The rose class has all the knowledge to implement generating vector data for
# roses and handles all the timing issues. It does not have the user interface
# for changing all the drawing parameters. It offers a "vision" of what an
# ideal Roses program should be, however, callers are welcome to assert their
# independence, override defaults, ignore features, etc.
from math import sin, cos, pi
from six import print_
# Rose class knows about:
# > Generating points and vectors (returning data as a list of points)
# > Starting a new rose (e.g. telling user to erase old vectors)
# > Stepping from one pattern to the next.
class rose:
"Defines everything needed for drawing a rose with timers."
# The following data is accessible by callers, but there are set
# methods for most everything and various method calls to client methods
# to display current values.
style = 100 # Angular distance along curve between points
sincr = -1 # Amount to increment style by in auto mode
petals = 2 # Lobes on the rose (even values have 2X lobes)
pincr = 1 # Amount to increment petals by in auto mode
nvec = 399 # Number of vectors to draw the rose
minvec = 0 # Minimum number acceptable in automatic mode
maxvec = 3600 # Maximum number acceptable in automatic mode
skipvec = 0 # Don't draw this many at the start (cheap animations)
drawvec = 3600 # Draw only this many (cheap animations)
step = 20 # Number of vectors to draw each clock tick
draw_delay = 50 # Time between roselet calls to watch pattern draw
wait_delay = 2000 # Time between roses in automatic mode
# Other variables that the application shouldn't access.
verbose = 0 # No good way to set this at the moment.
nextpt = 0 # Next position to draw on next clock tick
# Internal states:
INT_IDLE, INT_DRAW, INT_SEARCH, INT_WAIT, INT_RESIZE = range(5)
int_state = INT_IDLE
# Command states
CMD_STOP, CMD_GO = range(2)
cmd_state = CMD_STOP
# Return full rose line (a tuple of (x, y) tuples). Not used by interactive
# clients but still useful for command line and batch clients.
# This is the "purest" code and doesn't require the App* methods defined
# by the caller.
def rose(self, style, petals, vectors):
self.nvec = vectors
self.make_tables(vectors)
line = [(1.0, 0.0)]
for i in range (1, vectors):
theta = (style * i) % vectors
r = self.cos_table[(petals * theta) % vectors]
line.append((r * self.cos_table[theta], r * self.sin_table[theta]))
line.append((1.0, 0.0))
return line
# Generate vectors for the next chunk of rose.
# This is not meant to be called from an external module, as it is closely
# coupled to parameters set up within the class and limits set up by
# restart(). Restart() initializes all data this needs to start drawing a
# pattern, and clock() calls this to compute the next batch of points and
# hear if that is the last batch. We maintain all data we need to draw each
# batch after the first. theta should be 2.0*pi * style*i/self.nvec
# radians, but we deal in terms of the lookup table so it's just the index
# that refers to the same spot.
def roselet(self):
line = []
stop = self.nextpt + self.step
keep_running = True
if stop >= self.endpt:
stop = self.endpt
keep_running = False
for i in range (self.nextpt, int(stop + 1)):
theta = (self.style * i) % self.nvec
r = self.cos_table[(self.petals * theta) % self.nvec]
line.append((r * self.cos_table[theta], r * self.sin_table[theta]))
self.nextpt = stop
return line, keep_running
# Generate sine and cosine lookup tables. We could create data for just
# 1/4 of a circle, at least if vectors was a multiple of 4, and share a
# table for both sine and cosine, but memory is cheaper than it was in
# PDP-11 days. OTOH, small, shared tables would be more cache friendly,
# but if we were that concerned, this would be in C.
def make_tables(self, vectors):
self.sin_table = [sin(2.0 * pi * i / vectors) for i in range(vectors)]
self.cos_table = [cos(2.0 * pi * i / vectors) for i in range(vectors)]
# Rescale (x,y) data to match our window. Note the negative scaling in the
# Y direction, this compensates for Y moving down the screen, but up on
# graph paper.
def rescale(self, line, offset, scale):
for i in range(len(line)):
line[i] = (line[i][0] * scale + offset[0],
line[i][1] * (-scale) + offset[1])
return line
# Euler's Method for computing the greatest common divisor. Knuth's
# "The Art of Computer Programming" vol.2 is the standard reference,
# but the web has several good ones too. Basically this sheds factors
# that aren't in the GCD and returns when there's nothing left to shed.
# N.B. Call with a >= b.
def gcd(self, a, b):
while b != 0:
a, b = b, a % b
return a
# Erase any old vectors and start drawing a new rose. When the program
# starts, the sine and cosine tables don't exist, build them here. (Of
# course, if an __init__() method is added, move the call there.
# If we're in automatic mode, check to see if the new pattern has neither
# too few or too many vectors and skip it if so. Skip by setting up for
# a one tick wait to let us get back to the main loop so the user can
# update parameters or stop.
def restart(self):
if self.verbose:
print_('restart: int_state', self.int_state, 'cmd_state', self.cmd_state)
try:
tmp = self.sin_table[0]
except:
self.make_tables(self.nvec)
new_state = self.INT_DRAW
self.takesvec = self.nvec / self.gcd(self.nvec, self.style)
if not int(self.takesvec) & 1 and int(self.petals) & 1:
self.takesvec /= 2
if self.cmd_state == self.CMD_GO:
if self.minvec > self.takesvec or self.maxvec < self.takesvec:
new_state = self.INT_SEARCH
self.AppSetTakesVec(self.takesvec)
self.AppClear()
self.nextpt = self.skipvec
self.endpt = min(self.takesvec, self.skipvec + self.drawvec)
old_state, self.int_state = self.int_state, new_state
if old_state == self.INT_IDLE: # Clock not running
self.clock()
elif old_state == self.INT_WAIT: # May be long delay, restart
self.AppCancelTimer()
self.clock()
else:
return 1 # If called by clock(), return and start clock
return 0 # We're in INT_IDLE or INT_WAIT, clock running
# Called from App. Recompute the center and scale values for the subsequent pattern.
# Force us into INT_RESIZE state if not already there so that in 100 ms we'll start
# to draw something to give an idea of the new size.
def resize(self, size, delay):
xsize, ysize = size
self.center = (xsize / 2, ysize / 2)
self.scale = min(xsize, ysize) / 2.1
self.repaint(delay)
# Called from App or above. From App, called with small delay because
# some window managers will produce a flood of expose events or call us
# before initialization is done.
def repaint(self, delay):
if self.int_state != self.INT_RESIZE:
# print_('repaint after', delay)
self.int_state = self.INT_RESIZE
self.AppCancelTimer()
self.AppAfter(delay, self.clock)
# Method that returns the next style and petal values for automatic
# mode and remembers them internally. Keep things scaled in the
# range [0:nvec) because there's little reason to exceed that.
def next(self):
self.style += self.sincr
self.petals += self.pincr
if self.style <= 0 or self.petals < 0:
self.style, self.petals = \
abs(self.petals) + 1, abs(self.style)
if self.style >= self.nvec:
self.style %= self.nvec # Don't bother defending against 0
if self.petals >= self.nvec:
self.petals %= self.nvec
self.AppSetParam(self.style, self.petals, self.nvec)
# Resume pattern drawing with the next one to display.
def resume(self):
self.next()
return self.restart()
# Go/Stop button.
def cmd_go_stop(self):
if self.cmd_state == self.CMD_STOP:
self.cmd_state = self.CMD_GO
self.resume() # Draw next pattern
elif self.cmd_state == self.CMD_GO:
self.cmd_state = self.CMD_STOP
self.update_labels()
# Centralize button naming to share with initialization.
# Leave colors to the application (assuming it cares), we can't guess
# what's available.
def update_labels(self):
if self.cmd_state == self.CMD_STOP:
self.AppCmdLabels(('Go', 'Redraw', 'Backward', 'Forward'))
else: # Must be in state CMD_GO
self.AppCmdLabels(('Stop', 'Redraw', 'Reverse', 'Skip'))
# Redraw/Redraw button
def cmd_redraw(self):
self.restart() # Redraw current pattern
# Backward/Reverse button
# Useful for when you see an interesting pattern and want
# to go back to it. If running, just change direction. If stopped, back
# up one step. The resume code handles the step, then we change the
# incrementers back to what they were. (Unless resume changed them too.)
def cmd_backward(self):
self.sincr = -self.sincr
self.pincr = -self.pincr
if self.cmd_state == self.CMD_STOP:
self.resume();
self.sincr = -self.sincr # Go forward again
self.pincr = -self.pincr
else:
self.AppSetIncrs(self.sincr, self.pincr)
# Forward/Skip button. CMD_STOP & CMD_GO both just call resume.
def cmd_step(self):
self.resume() # Draw next pattern
# Handler called on each timer event. This handles the metered drawing
# of a rose and the delays between them. It also registers for the next
# timer event unless we're idle (rose is done and the delay between
# roses is 0.)
def clock(self):
if self.int_state == self.INT_IDLE:
# print_('clock called in idle state')
delay = 0
elif self.int_state == self.INT_DRAW:
line, run = self.roselet()
self.AppCreateLine(self.rescale(line, self.center, self.scale))
if run:
delay = self.draw_delay
else:
if self.cmd_state == self.CMD_GO:
self.int_state = self.INT_WAIT
delay = self.wait_delay
else:
self.int_state = self.INT_IDLE
delay = 0
elif self.int_state == self.INT_SEARCH:
delay = self.resume() # May call us to start drawing
if self.int_state == self.INT_SEARCH:
delay = self.draw_delay # but not if searching.
elif self.int_state == self.INT_WAIT:
if self.cmd_state == self.CMD_GO:
delay = self.resume() # Calls us to start drawing
else:
self.int_state = self.INT_IDLE
delay = 0
elif self.int_state == self.INT_RESIZE: # Waiting for resize event stream to settle
self.AppSetParam(self.style, self.petals, self.nvec)
self.AppSetIncrs(self.sincr, self.pincr)
delay = self.restart() # Calls us to start drawing
if delay == 0:
if self.verbose:
print_('clock: going idle from state', self.int_state)
else:
self.AppAfter(delay, self.clock)
# Methods to allow App to change the parameters on the screen.
# These expect to be called when the associated paramenter changes,
# but work reasonably well if several are called at once. (E.g.
# tkroses.py groups them into things that affect the visual display
# and warrant a new start, and things that just change and don't affect
# the ultimate pattern. All parameters within a group are updated
# at once even if the value hasn't changed.
# We restrict the style and petals parameters to the range [0: nvec)
# since numbers outside of that range aren't interesting. We don't
# immediately update the value in the application, we probably should.
# NW control window - key parameters
def SetStyle(self, value):
self.style = value % self.nvec
self.restart()
def SetSincr(self, value):
self.sincr = value
def SetPetals(self, value):
self.petals = value % self.nvec
self.restart()
def SetPincr(self, value):
self.pincr = value
# SW control window - vectors
def SetVectors(self, value):
self.nvec = value
self.style %= value
self.petals %= value
self.AppSetParam(self.style, self.petals, self.nvec)
self.make_tables(value)
self.restart()
def SetMinVec(self, value):
if self.maxvec >= value and self.nvec >= value:
self.minvec = value
def SetMaxVec(self, value):
if self.minvec < value:
self.maxvec = value
def SetSkipFirst(self, value):
self.skipvec = value
self.restart()
def SetDrawOnly(self, value):
self.drawvec = value
self.restart()
# SE control window - timings
def SetStep(self, value):
self.step = value
def SetDrawDelay(self, value):
self.draw_delay = value
def SetWaitDelay(self, value):
self.wait_delay = value
# Method for client to use to have us supply our defaults.
def SupplyControlValues(self):
self.update_labels()
self.AppSetParam(self.style, self.petals, self.nvec)
self.AppSetIncrs(self.sincr, self.pincr)
self.AppSetVectors(self.nvec, self.minvec, self.maxvec,
self.skipvec, self.drawvec)
self.AppSetTiming(self.step, self.draw_delay, self.wait_delay)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_access_profile_request, build_get_request, build_get_upgrade_profile_request, build_list_by_resource_group_request, build_list_cluster_admin_credentials_request, build_list_cluster_user_credentials_request, build_list_request, build_reset_aad_profile_request_initial, build_reset_service_principal_profile_request_initial, build_update_tags_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_access_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
role_name=role_name,
template_url=self.get_access_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
@distributed_trace_async
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_cluster_user_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_cluster_user_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'TagsObject')
request = build_update_tags_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._update_tags_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_04_01.models.ManagedCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
request = build_reset_service_principal_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_service_principal_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters:
~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedClusterAADProfile')
request = build_reset_aad_profile_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self._reset_aad_profile_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
@distributed_trace_async
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cdms2
import cdtime
import cmor
import sys
import getopt
import factory
import numpy
from factory.formats import import_equation
from Toolbox.ESGFresources import *
from Toolbox.ESGFexcel import *
from Toolbox.CMORresources import CMORTable
# ************************************************************************
# process()
#
# Convert to obs4MIPS file format.
# ************************************************************************
def process( rc ):
'''
Convert netcdf/matlab/grads files into CMIP5 format.
'''
pdb.set_trace()
# ----------------------------
# Loop yearly on file list.
# ----------------------------
file_template = rc['file_template'].split(",");
if( len(file_template) == 2 ):
template_parameter = file_template[1]
rc['file_template'] = file_template[0]
else:
template_parameter = 'years'
for year in rc[template_parameter].split(","):
if(year == ''):
files= os.popen( "ls " + rc['file_template'] ).readlines()
else:
# ------------------------------------------------
# Use string formating for path with same argument
# ------------------------------------------------
try:
tmplFile = rc['file_template'] % (year)
except:
tmplFile = rc['file_template'].format(year)
if( not os.path.isfile( tmplFile) ) :
print "**** Warning %s not found\n" % ( tmplFile )
continue
files= os.popen( "ls " + tmplFile).readlines()
if( files == [] ):
print "No file found: Check your resource file"
return -1
# ------------------------------------------------
# Get the right handler to manage this file format
# ------------------------------------------------
Handler = factory.HandlerFormats(files[0].strip())
# -----------------------------------
# Take care of cmor initialization.
# -----------------------------------
cmor.setup(inpath=rc['inpath'],
netcdf_file_action = cmor.CMOR_REPLACE)
cmor.dataset(experiment_id = rc['experiment_id'],
institution = rc['institution' ],
calendar = rc['calendar' ],
institute_id = rc['institute_id' ],
model_id = rc['model_id' ],
source = rc['source' ],
contact = rc['contact' ],
references = rc['references' ])
# -----------------------------------------
# add extra Global Attributes for obs4MIPs.
# -----------------------------------------
cmor.set_cur_dataset_attribute( 'instrument', rc['instrument' ])
cmor.set_cur_dataset_attribute( 'mip_specs', rc['mip_specs' ])
cmor.set_cur_dataset_attribute( 'data_structure', rc['data_structure'])
cmor.set_cur_dataset_attribute( 'source_type', rc['source_type' ])
cmor.set_cur_dataset_attribute( 'source_id', rc['source_id' ])
cmor.set_cur_dataset_attribute( 'realm', rc['realm' ])
cmor.set_cur_dataset_attribute( 'obs_project', rc['obs_project' ])
cmor.set_cur_dataset_attribute( 'processing_version',
rc['processing_version'] )
cmor.set_cur_dataset_attribute( 'processing_level',
rc['processing_level'] )
cmor.load_table(rc['table'])
# ---------------------------------------------------------------------
# We loop on each file found, a new cmor file will be create on each
# iteration. If you want to aggregate, you need to use Grads ctl file
# or NeCDF list of file.
# ---------------------------------------------------------------------
for file in files:
fnm=file.strip() # Get rid of \n
aVariable = eval(rc['original_var'])
nbVariable = len(aVariable)
# -----------------------------------------------------
# ECMWF needs synoptic time 00z and 12z in he filename.
# We extract it from the first file in the list.
# -----------------------------------------------------
if( rc['source_fn'] == 'SYNOPTIC' ):
index = fnm.find('z.')
rc['SYNOPTIC'] = fnm[index-2:index]
# -----------------------
# Work on all variables
# -------------------------
for j in arange(nbVariable):
# ----------------------------------------------------
# Fetch the variable directly or excute equation.
# ----------------------------------------------------
try:
variable=aVariable[j]
Handler.open(fnm, variable=variable)
rc['cvrt_original_var'] = aVariable[j]
print "Working on variable %s " % variable
except:
if( aVariable[j] != 'equation' ) :
print "Variable %s can't open" % variable
continue
else:
print "Executing %s " % eval(rc['equation'])[j]
# pdb.set_trace()
rc['cvrt_original_units'] = eval(rc['original_units'])[j]
rc['cvrt_cmor_var'] = eval(rc['cmor_var'])[j]
rc['cvrt_equation'] = eval(rc['equation'])[j]
rc['cvrt_level'] = eval(rc['level'])[j]
data=Handler.getData()
# ----------------------------------------------------------
# Evaluate equation if needed. Usually used to change units
# ----------------------------------------------------------
if( rc['cvrt_equation'][0] == '@' ):
fncName = rc['cvrt_equation'][1:]
fnc = import_equation( "equations.%s" % fncName )
data[:]= fnc(Handler)
else:
data[:]=eval(rc['cvrt_equation'])
# -------------------------------------------------------------
# Save filled value in case data type is changed in createAxes
# -------------------------------------------------------------
fill_value = data.fill_value
# ---------------------------------------------
# Extract latitude/longitude
# ---------------------------------------------
lonvals=Handler.getLongitude()
latvals=Handler.getLatitude()
# ---------------------
# Create cmor time axis
# ----------------------
(rel_time, rel_time_bnds) = createTime(Handler, rc)
# ---------------------------------------------------
# Create cmor axes and add an axis to data if needed
# ---------------------------------------------------
(axes, data) = createAxes( rc, latvals, lonvals, data )
axis_ids = list()
for axis in axes:
axis_id = cmor.axis(**axis)
axis_ids.append(axis_id)
# ----------------------------------------------------------
# Create cmor variable
# Note: since this is in the loop, a new cmor file will be
# create for each cmor write command.
# ----------------------------------------------------------
varid = cmor.variable(table_entry = rc['cvrt_cmor_var'],
axis_ids = axis_ids,
history = '',
missing_value = fill_value,
original_name = rc['cvrt_original_var'],
units = rc['cvrt_original_units']
)
# -------------------------------
# Write data for this time frame.
# -------------------------------
cmor.write(varid,data,\
time_vals=rel_time,time_bnds=rel_time_bnds)
cmor.close(varid,file_name=True)
# ---------------------------------------
# Rename cmor files according to project.
# ---------------------------------------
if( movefiles(rc) ):
return -2
cmor.close()
return 0
# ********************************************************************
#
# createTime()
#
# Define Time and Time bound axes for cmor
#
# ********************************************************************
def createTime(Handler, rc):
'''
InputtimeUnits: specified from resource file or from first file
in a list of file.
return relative time and time bounds using OutputTimeUnits from
resource file.
'''
# ----------------------------------------------------
# Retrieve time units from file if not provided in the
# resource file.
# ----------------------------------------------------
InputTimeUnits = Handler.getTimeUnits(rc['InputTimeUnits'])
# --------------------------------------------------------
# Create time relative to January 1st 1900 to facilitate
# Threds software file handling.
# -------------------------------------------------------
cur_time = Handler.getTime(InputTimeUnits)
rel_time =[cur_time[i].torel(rc['OutputTimeUnits']).value
for i in range(len(cur_time))]
if( len(rel_time) == 1 ) :
deltarel = 1
else:
deltarel = rel_time[2] - rel_time[1]
rel_time_bnds = rel_time[:]
rel_time_bnds.append(rel_time[-1]+deltarel)
return rel_time, rel_time_bnds
# ********************************************************************
#
# getCMIP5lev()
#
# Extract CMIP5 mandatory level and recreate a new data array.
# They are 16 mandatory levels.
#
# ********************************************************************
def getCMIP5lev(data,rc):
'''
'''
oTable = CMORTable(rc['inpath'], rc['table'], "plevs")
# ----------------------
# Extract spefied levels
# ----------------------
if( 'levels' in oTable.dico.keys() ):
#pdb.set_trace()
dataLevels = data.getLevel()[:]
if( data.getLevel().units == "millibars" or
data.getLevel().units == "hPa" or
data.getLevel().units == "mbar" ):
# --------------------------
# Change units for to Pascal
# ---------------------------
LevelScaleFactor = 100
dataLevels = data.getLevel()[:] * LevelScaleFactor
# ----------------------------------------
# No level selected, return all data array
# ----------------------------------------
if( len(rc['cvrt_level'].split(":")) == 1 ):
levels = [ float(item) for item in dataLevels ]
lev=cdms2.createAxis( levels )
lev.designateLevel()
lev.units="pa"
lev.long_name=data.getLevel().long_name
#lev.id="lev"
#lev=data.getAxis(1)
#lev.__setattr__('_data_',dataLevels.astype(float))
#lev.__setattr__('units',"Pa")
#lev.units="hPa"
data2=data.pressureRegrid(lev)
return data2
if( rc['cvrt_level'].split(':')[1] == "CMIP5" ):
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
lev.designateLevel()
lev.units="pa"
lev.long_name = data.getLevel().long_name
data2=data.pressureRegrid(lev)
lev[:]=lev[:]*LevelScaleFactor
return data2
else:
# -----------------------
# Assume a list of values
# -----------------------
levels = rc['cvrt_level'].split(':')[1].split(",")
# --------------------------
# Change units to Pascal
# ---------------------------
dataLevels = [ float(rc['cvrt_level'].split(":")[1].split(",")[i]) * \
LevelScaleFactor for i in range(len(levels)) ]
# -----------------------------------
# Match dataLevels with CMIP5 levels
# Use file units
# -----------------------------------
lev=cdms2.createAxis( [ float(item/LevelScaleFactor)
for item in dataLevels
if item in oTable.dico['levels' ] ] )
# -----------------------------------
# Set axis metadata
# -----------------------------------
lev.units="pa"
lev.long_name = data.getLevel().long_name
lev.designateLevel()
# -----------------------------------
# Extract specified levels
# -----------------------------------
data2=data.pressureRegrid(lev)
# -----------------------------------
# Scale data back
# -----------------------------------
lev[:]=lev[:]*LevelScaleFactor
return data2
return data
# ********************************************************************
#
# createAxes()
#
# Define axes required by cmor and add z axis to data if needed
#
# ********************************************************************
def createAxes(rc, latvals, lonvals, data):
# ---------------------------------------------
# Create time/lat/lon axes using a dictionary
# ---------------------------------------------
axes = [
{'table_entry' : 'time',
'units' : rc['OutputTimeUnits']},
{'table_entry' : 'latitude',
'units' : 'degrees_north',
'coord_vals' : latvals,
'cell_bounds' : latvals.getBounds()},
{'table_entry' : 'longitude',
'units' : 'degrees_east',
'coord_vals' : lonvals,
'cell_bounds' : lonvals.getBounds()},
]
fill_value = data.fill_value
if( rc['cvrt_level'] == 'height2m' ):
axes.append({'table_entry' : 'height2m',
'units' : 'm',
'coord_vals' : [2.0] })
data = numpy.array(data[:])
data = data[:,:,:,numpy.newaxis]
elif( rc['cvrt_level'] != '' ):
data = getCMIP5lev( data, rc )
levels=data.getLevel()[:]
axes = numpy.insert(axes, 1,
{'table_entry' : 'plevs',
'units' : 'Pa',
'coord_vals' : levels })
return axes, data
# ********************************************************************
#
# usage()
#
# ********************************************************************
def usage(message):
'''
Describe program synopsis.
'''
print
print "*************************"
print message
print "*************************"
print
print
print "obs4MIPS_process.py [-h] -r resource"
print " resource: File containing Global attributes"
print ""
print "obs4MIPS will convert an input data file into CMIP5 format using "
print "CMOR. A directory path will be creating using CMOR by default or "
print "using a template provided in the resource file."
print
# ********************************************************************
#
# main()
#
# ********************************************************************
def main():
'''
'''
pdb.set_trace()
try:
opts, args = getopt.getopt(sys.argv[1:], "hy:r:x:",
["help" ,"year=","resource=","excel="])
except getopt.GetoptError, err:
usage(str(err))# will print something like "option -a not recognized"
return(2)
# --------------------------
# Verify passed arguments
# --------------------------
year = -1
resource = None
excel = None
for o, a in opts:
if o in ("-r", "--resource"):
resource = a
elif o in ("-x", "--excel"):
excel = a
elif o in ("-h", "--help"):
usage()
return(0)
elif o in ("-y", "--year"):
yr = a
else:
assert False, "unhandled option"
# ------------------------------
# Does the resource file exist?
# ------------------------------
if( ((resource == None ) or ( not os.path.isfile( resource ) )) and (( excel == None ) or ( not os.path.isfile( excel ) )) ):
usage("bad Input Resource/Excel File")
return 1
# -----------------------
# Read in "rc" file
# -----------------------
if( resource ):
rc = ESGFresources( resource )
if( excel ):
rc = ESGFexcel( excel )
# --------------------------------
# Extract CMIP5 Table information
# --------------------------------
oTable = CMORTable(rc['inpath'], rc['table'])
if( not 'original_var' in rc.resources.keys() ):
sys.exit(-1)
rc['project_id'] = oTable[ 'project_id' ]
rc['product'] = oTable[ 'product' ]
rc['modeling_realm'] = oTable[ 'modeling_realm' ]
rc['frequency'] = oTable[ 'frequency' ]
if( process(rc) ):
return -1
return 0
# ********************************************************************
#
# Call main program and return exit code
#
# ********************************************************************
if __name__ == '__main__':
sys.exit(main())
|
|
""" core implementation of testing process: init, session, runtest loop. """
from __future__ import absolute_import, division, print_function
import contextlib
import functools
import os
import pkgutil
import six
import sys
import _pytest
from _pytest import nodes
import _pytest._code
import py
from _pytest.config import directory_arg, UsageError, hookimpl
from _pytest.outcomes import exit
from _pytest.runner import collect_one_node
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini(
"norecursedirs",
"directory patterns to avoid for recursion",
type="args",
default=[".*", "build", "dist", "CVS", "_darcs", "{arch}", "*.egg", "venv"],
)
parser.addini(
"testpaths",
"directories to search for tests when no files or directories are given in the "
"command line.",
type="args",
default=[],
)
# parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
# )
group = parser.getgroup("general", "running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="exit instantly on first error or failed test.",
),
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="exit after first num failures or errors.",
)
group._addoption(
"--strict",
action="store_true",
help="marks not registered in configuration file raise errors.",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit "
"configuration files.",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur.",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
action="store_true",
help="only collect tests, don't execute them.",
),
group.addoption(
"--pyargs",
action="store_true",
help="try to interpret all arguments as python packages.",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="ignore path during collection (multi-allowed).",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="deselect item during collection (multi-allowed).",
)
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="only load conftest.py's relative to specified dir.",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files.",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests.",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
metavar="dir",
help=(
"base temporary directory for this test run."
"(warning: this directory is removed if it exists)"
),
)
def pytest_configure(config):
__import__("pytest").config = config # compatibility
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
raise
except Failed:
session.exitstatus = EXIT_TESTSFAILED
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
exitstatus = EXIT_INTERRUPTED
if initstate <= 2 and isinstance(excinfo.value, exit.Exception):
sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg))
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except: # noqa
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path):
"""Attempts to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script"""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any([fname.basename in activates for fname in bindir.listdir()])
def pytest_ignore_collect(path, config):
ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath())
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
if py.path.local(path) in ignore_paths:
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if _in_venv(path) and not allow_in_venv:
return True
# Skip duplicate paths.
keepduplicates = config.getoption("keepduplicates")
duplicate_paths = config.pluginmanager._duplicatepaths
if not keepduplicates:
if path in duplicate_paths:
return True
else:
duplicate_paths.add(path)
return False
def pytest_collection_modifyitems(items, config):
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@contextlib.contextmanager
def _patched_find_module():
"""Patch bug in pkgutil.ImpImporter.find_module
When using pkgutil.find_loader on python<3.4 it removes symlinks
from the path due to a call to os.path.realpath. This is not consistent
with actually doing the import (in these versions, pkgutil and __import__
did not share the same underlying code). This can break conftest
discovery for pytest where symlinks are involved.
The only supported python<3.4 by pytest is python 2.7.
"""
if six.PY2: # python 3.4+ uses importlib instead
def find_module_patched(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
# original: path = [os.path.realpath(self.path)]
path = [self.path]
try:
file, filename, etc = pkgutil.imp.find_module(subname, path)
except ImportError:
return None
return pkgutil.ImpLoader(fullname, file, filename, etc)
old_find_module = pkgutil.ImpImporter.find_module
pkgutil.ImpImporter.find_module = find_module_patched
try:
yield
finally:
pkgutil.ImpImporter.find_module = old_find_module
else:
yield
class FSHookProxy(object):
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = "builtins" # for py3
class Failed(Exception):
""" signals a stop as failed test run. """
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
def __init__(self, config):
nodes.FSCollector.__init__(
self, config.rootdir, parent=None, config=config, session=self, nodeid=""
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.shouldfail = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._initialpaths = frozenset()
# Keep track of any collected nodes in here, so we don't duplicate fixtures
self._node_cache = {}
self.config.pluginmanager.register(self, name="session")
@hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
initialpaths = []
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
initialpaths.append(parts[0])
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
# XXX: test this
raise UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
from _pytest.python import Package
names = self._parsearg(arg)
argpath = names.pop(0).realpath()
paths = []
root = self
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests
if not self.config.option.doctestmodules:
for parent in argpath.parts():
pm = self.config.pluginmanager
if pm._confcutdir and pm._confcutdir.relto(parent):
continue
if parent.isdir():
pkginit = parent.join("__init__.py")
if pkginit.isfile():
if pkginit in self._node_cache:
root = self._node_cache[pkginit][0]
else:
col = root._collectfile(pkginit)
if col:
if isinstance(col[0], Package):
root = col[0]
# always store a list in the cache, matchnodes expects it
self._node_cache[root.fspath] = [root]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.check(dir=1):
assert not names, "invalid arg %r" % (arg,)
for path in argpath.visit(
fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True
):
pkginit = path.dirpath().join("__init__.py")
if pkginit.exists() and not any(x in pkginit.parts() for x in paths):
for x in root._collectfile(pkginit):
yield x
paths.append(x.fspath.dirpath())
if not any(x in path.parts() for x in paths):
for x in root._collectfile(path):
if (type(x), x.fspath) in self._node_cache:
yield self._node_cache[(type(x), x.fspath)]
else:
self._node_cache[(type(x), x.fspath)] = x
yield x
else:
assert argpath.check(file=1)
if argpath in self._node_cache:
col = self._node_cache[argpath]
else:
col = root._collectfile(argpath)
if col:
self._node_cache[argpath] = col
for y in self.matchnodes(col, names):
yield y
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path."""
try:
with _patched_find_module():
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
with _patched_find_module():
path = loader.get_filename(x)
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
raise UsageError(
"file or package not found: " + arg + " (missing __init__.py?)"
)
raise UsageError("file not found: " + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, nodes.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, nodes.Collector)
key = (type(node), node.nodeid)
if key in self._node_cache:
rep = self._node_cache[key]
else:
rep = collect_one_node(node)
self._node_cache[key] = rep
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
# report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134)
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
|
import search
from math import(cos, pi)
import numpy
# A sample map problem
sumner_map = search.UndirectedGraph(dict(
Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
Cottontown=dict(Portland=18),
Fairfield=dict(Mitchellville=21, Portland=17),
Mitchellville=dict(Portland=7, Fairfield=21),
))
'''
sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
sumner_puzzle.label = 'Sumner'
sumner_puzzle.description =
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
'''
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description =
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
nomich_map = search.UndirectedGraph(dict(
TraverseCity = dict(Interlochen=20, Leland=38, GlenArbor=37, OldMission=27,Copemish=43,ElkRapids=30),
Interlochen = dict(TraverseCity=23, Copemish=20, Leland=42,BearLake=62),
Leland=dict(TraverseCity=38, GlenArbor=24, Copemish=62, OldMission=61),
GlenArbor=dict(TraverseCity=37, ElkRapids=76),
OldMission=dict(TraverseCity=27),
Copmish = dict(TraverseCity=43, Interlochen=23, BearLake=21),
ElkRapids= dict(TraverseCity=30, GlenArbor=76)
))
nomich_map.location = dict(
TraverseCity=(0,0),
OldMission=(0,35),
ElkRapids=(45,55),
GlenArbor=(-65,35),
Leland=(-45,45),
Copmish =(-50,55),
Interlochen=(-40,-30),
BearLake=(-60,-60)
)
nomich_puzzle1= search.GraphProblem('TraverseCity', 'Copemish', nomich_map)
nomich_puzzle1.label='A puzzle where uniform-cost works best'
nomich_puzzle1.description='''
A puzzle where uniform-cost works best.
'''
nomich_puzzle2= search.GraphProblem('Interlochen', 'ElkRapids', nomich_map)
nomich_puzzle2.label='Breadth-First is better than Depth-First'
nomich_puzzle2.description='''
A puzzle where Breadth-First is better than Depth-First
'''
nomich_puzzle3= search.GraphProblem('Leland','ElkRapids' , nomich_map)
nomich_puzzle3.label='BFS better than bestFs'
nomich_puzzle3.description='''
A puzzle where Breadth-First is better than Depth-First
'''
nomich_puzzle4= search.GraphProblem('Interlochen','Leland' , nomich_map)
nomich_puzzle4.label='A* expands less nodes with same cost as UFC'
nomich_puzzle4.description='''
A puzzle where Breadth-First is better than Depth-First
'''
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
def String2State(myString):
a0 = "A1,A2,A3,B1,B2,B3,C1,C2,C3".split(",")
a1 = myString.split(",")
state = [[],[],[],
[],[],[],
[],[],[]]
for i in range(len(state)):
state[i] = [a0[i], int(a1[i])]
return state
def State2String(state):
string =""
for i in range(len(state)):
if i < len(state)-1:
string = string + str(state[i][1])+","
else:
string = string + str(state[i][1])
return string.strip()
def getBest(state,goal):
string_state=state.state.split(",")
goals = goal.split(",")
goal_state=[]
new_state=[]
for j in range(len(string_state)):
new_state.append(int(string_state[j]))
goal_state.append(int(goals[j]))
#print(goal_state)
#print(new_state)
best=0
for i in range(len(new_state)):
#print(new_state)
best = (abs(i-goal_state.index(new_state[i]))) + best
return best
class SlidingPuzzle(search.Problem):
def __init__(self,initial,goal):
self.initial = initial
self.goal = goal
def actions(self,state):
state1 = String2State(state)[:]
#print(state1)
x = "blank"
for i in range(len(state1)):
#print(state1[i][1])
if state1[i][1] == 0:
x = state1[i][0]
break
#print(x)
#print('space:'+ str(x))
if x == 'A1':
return[1,3]
elif x == 'A2':
return[0,2,4]
elif x == 'A3':
return[1,5]
elif x =='B1':
return[0,4,6]
elif x =='B2':
return[3,1,5,7]
elif x =='B3':
#print([4,2,8])
return[4,2,8]
elif x =='C1':
return[7,3]
elif x =='C2':
return[8,4,6]
elif x =='C3':
return[7,5]
def result(self,state, action):
state1 = String2State(state)[:]
#print('resultIn: '+str(state1))
for i in range(len(state1)):
#print(state1[i][1])
if state1[i][1] == 0:
x = i
break
#print(x)
#print(action)
state1[x][1] = state1[action][1]
state1[action][1] = 0
#print('new state: '+str(state1))
#print(State2String(state1))
return State2String(state1)
def goal_test(self,state):
#print('state: ' + state)
#print('goal: '+ self.goal)
return state == self.goal
def h(self,state):
return getBest(state,self.goal)
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
start_puzzle1 = "1,2,3,4,5,0,7,8,6"
SlidingPuzzle_Goal = "1,2,3,4,5,6,7,8,0"
start_puzzle2 = "1,2,3,4,0,6,5,7,8"
slide_puzzle1 = SlidingPuzzle(str(start_puzzle1), str(SlidingPuzzle_Goal))
slide_puzzle1.label ='SlidepuzzleTest'
slide_puzzle2= SlidingPuzzle(start_puzzle2,SlidingPuzzle_Goal)
slide_puzzle2.label= "Slidepuzzle2"
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
mySearches = [
# swiss_puzzle,
# sumner_puzzle,
# romania_puzzle,
#switch_puzzle,
nomich_puzzle1,
nomich_puzzle2,
nomich_puzzle3,
nomich_puzzle4,
slide_puzzle1,
#slide_puzzle2
]
mySearchMethods = []
|
|
"""Support for Toon sensors."""
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.const import ENERGY_KILO_WATT_HOUR, POWER_WATT
from . import (
ToonData,
ToonEntity,
ToonElectricityMeterDeviceEntity,
ToonGasMeterDeviceEntity,
ToonSolarDeviceEntity,
ToonBoilerDeviceEntity,
)
from .const import CURRENCY_EUR, DATA_TOON, DOMAIN, VOLUME_CM3, VOLUME_M3, RATIO_PERCENT
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Toon sensors based on a config entry."""
toon = hass.data[DATA_TOON][entry.entry_id]
sensors = [
ToonElectricityMeterDeviceSensor(
toon, "power", "value", "Current Power Usage", "mdi:power-plug", POWER_WATT
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"average",
"Average Power Usage",
"mdi:power-plug",
POWER_WATT,
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"daily_value",
"Power Usage Today",
"mdi:power-plug",
ENERGY_KILO_WATT_HOUR,
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"daily_cost",
"Power Cost Today",
"mdi:power-plug",
CURRENCY_EUR,
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"average_daily",
"Average Daily Power Usage",
"mdi:power-plug",
ENERGY_KILO_WATT_HOUR,
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"meter_reading",
"Power Meter Feed IN Tariff 1",
"mdi:power-plug",
ENERGY_KILO_WATT_HOUR,
),
ToonElectricityMeterDeviceSensor(
toon,
"power",
"meter_reading_low",
"Power Meter Feed IN Tariff 2",
"mdi:power-plug",
ENERGY_KILO_WATT_HOUR,
),
]
if toon.gas:
sensors.extend(
[
ToonGasMeterDeviceSensor(
toon,
"gas",
"value",
"Current Gas Usage",
"mdi:gas-cylinder",
VOLUME_CM3,
),
ToonGasMeterDeviceSensor(
toon,
"gas",
"average",
"Average Gas Usage",
"mdi:gas-cylinder",
VOLUME_CM3,
),
ToonGasMeterDeviceSensor(
toon,
"gas",
"daily_usage",
"Gas Usage Today",
"mdi:gas-cylinder",
VOLUME_M3,
),
ToonGasMeterDeviceSensor(
toon,
"gas",
"average_daily",
"Average Daily Gas Usage",
"mdi:gas-cylinder",
VOLUME_M3,
),
ToonGasMeterDeviceSensor(
toon,
"gas",
"meter_reading",
"Gas Meter",
"mdi:gas-cylinder",
VOLUME_M3,
),
ToonGasMeterDeviceSensor(
toon,
"gas",
"daily_cost",
"Gas Cost Today",
"mdi:gas-cylinder",
CURRENCY_EUR,
),
]
)
if toon.solar:
sensors.extend(
[
ToonSolarDeviceSensor(
toon,
"solar",
"value",
"Current Solar Production",
"mdi:solar-power",
POWER_WATT,
),
ToonSolarDeviceSensor(
toon,
"solar",
"maximum",
"Max Solar Production",
"mdi:solar-power",
POWER_WATT,
),
ToonSolarDeviceSensor(
toon,
"solar",
"produced",
"Solar Production to Grid",
"mdi:solar-power",
POWER_WATT,
),
ToonSolarDeviceSensor(
toon,
"solar",
"average_produced",
"Average Solar Production to Grid",
"mdi:solar-power",
POWER_WATT,
),
ToonElectricityMeterDeviceSensor(
toon,
"solar",
"meter_reading_produced",
"Power Meter Feed OUT Tariff 1",
"mdi:solar-power",
ENERGY_KILO_WATT_HOUR,
),
ToonElectricityMeterDeviceSensor(
toon,
"solar",
"meter_reading_low_produced",
"Power Meter Feed OUT Tariff 2",
"mdi:solar-power",
ENERGY_KILO_WATT_HOUR,
),
]
)
if toon.thermostat_info.have_ot_boiler:
sensors.extend(
[
ToonBoilerDeviceSensor(
toon,
"thermostat_info",
"current_modulation_level",
"Boiler Modulation Level",
"mdi:percent",
RATIO_PERCENT,
)
]
)
async_add_entities(sensors, True)
class ToonSensor(ToonEntity):
"""Defines a Toon sensor."""
def __init__(
self,
toon: ToonData,
section: str,
measurement: str,
name: str,
icon: str,
unit_of_measurement: str,
) -> None:
"""Initialize the Toon sensor."""
self._state = None
self._unit_of_measurement = unit_of_measurement
self.section = section
self.measurement = measurement
super().__init__(toon, name, icon)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return "_".join(
[DOMAIN, self.toon.agreement.id, "sensor", self.section, self.measurement]
)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self) -> None:
"""Get the latest data from the sensor."""
section = getattr(self.toon, self.section)
value = None
if not section:
return
if self.section == "power" and self.measurement == "daily_value":
value = round(
(float(section.daily_usage) + float(section.daily_usage_low)) / 1000.0,
2,
)
if value is None:
value = getattr(section, self.measurement)
if self.section == "power" and self.measurement in [
"meter_reading",
"meter_reading_low",
"average_daily",
]:
value = round(float(value) / 1000.0, 2)
if self.section == "solar" and self.measurement in [
"meter_reading_produced",
"meter_reading_low_produced",
]:
value = float(value) / 1000.0
if self.section == "gas" and self.measurement in [
"average_daily",
"daily_usage",
"meter_reading",
]:
value = round(float(value) / 1000.0, 2)
self._state = max(0, value)
class ToonElectricityMeterDeviceSensor(ToonSensor, ToonElectricityMeterDeviceEntity):
"""Defines a Electricity Meter sensor."""
pass
class ToonGasMeterDeviceSensor(ToonSensor, ToonGasMeterDeviceEntity):
"""Defines a Gas Meter sensor."""
pass
class ToonSolarDeviceSensor(ToonSensor, ToonSolarDeviceEntity):
"""Defines a Solar sensor."""
pass
class ToonBoilerDeviceSensor(ToonSensor, ToonBoilerDeviceEntity):
"""Defines a Boiler sensor."""
pass
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test scenario for Gcp Http(s) Load Balancers.
# Standard python modules.
import copy
import json
import time
# citest modules.
import citest.gcp_testing as gcp
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
SCOPES = [gcp.COMPUTE_READ_WRITE_SCOPE]
GCE_URL_PREFIX = 'https://www.googleapis.com/compute/v1/projects/'
class GoogleHttpLoadBalancerTestScenario(sk.SpinnakerTestScenario):
'''Defines the tests for L7 Load Balancers.
'''
MINIMUM_PROJECT_QUOTA = {
'INSTANCE_TEMPLATES': 1,
'BACKEND_SERVICES': 3,
'URL_MAPS': 1,
'HEALTH_CHECKS': 1,
'IN_USE_ADDRESSES': 2,
'SSL_CERTIFICATES': 2,
'TARGET_HTTP_PROXIES': 1,
'TARGET_HTTPS_PROXIES': 1,
'FORWARDING_RULES': 2
}
MINIMUM_REGION_QUOTA = {
'CPUS': 2,
'IN_USE_ADDRESSES': 2,
'INSTANCE_GROUP_MANAGERS': 1,
'INSTANCES': 2,
}
@classmethod
def new_agent(cls, bindings):
'''Implements citest.service_testing.AgentTestScenario.new_agent.'''
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 1200
return agent
def __init__(self, bindings, agent=None):
'''Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
'''
super(GoogleHttpLoadBalancerTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
self.__lb_detail = 'httplb'
self.TEST_APP = bindings['TEST_APP']
self.__lb_name = '{app}-{stack}-{detail}'.format(
app=bindings['TEST_APP'], stack=bindings['TEST_STACK'],
detail=self.__lb_detail)
self.__first_cert = 'first-cert-%s' % (bindings['TEST_APP'])
self.__proto_hc = {
'name': 'basic-' + self.TEST_APP,
'requestPath': '/',
'port': 80,
'checkIntervalSec': 2,
'timeoutSec': 1,
'healthyThreshold': 3,
'unhealthyThreshold': 4
}
self.__proto_delete = {
'type': 'deleteLoadBalancer',
'cloudProvider': 'gce',
'loadBalancerType': 'HTTP',
'loadBalancerName': self.__lb_name,
'region': bindings['TEST_GCE_REGION'],
'regions': [bindings['TEST_GCE_REGION']],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': '[anonymous]'
}
self.__proto_upsert = {
'cloudProvider': 'gce',
'provider': 'gce',
'stack': bindings['TEST_STACK'],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'region': bindings['TEST_GCE_REGION'],
'loadBalancerType': 'HTTP',
'loadBalancerName': self.__lb_name,
'urlMapName': self.__lb_name,
'listenersToDelete': [],
'portRange': '80',
'defaultService': {
'name': 'default-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
},
'certificate': self.__first_cert,
'hostRules': [
{
'hostPatterns': ['host1.com', 'host2.com'],
'pathMatcher': {
'pathRules': [
{
'paths': ['/path', '/path2/more'],
'backendService': {
'name': 'bs-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
}
}
],
'defaultService': {
'name': 'pm-' + self.TEST_APP,
'backends': [],
'healthCheck': self.__proto_hc,
}
}
}
],
'type': 'upsertLoadBalancer',
'availabilityZones': {bindings['TEST_GCE_REGION']: []},
'user': '[anonymous]'
}
def _get_bs_link(self, bs):
'''Make a fully-formatted backend service link.
'''
return (GCE_URL_PREFIX
+ self.bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID']
+ '/global/backendServices/' + bs)
def _get_hc_link(self, hc):
'''Make a fully-formatted health check link.
'''
return (GCE_URL_PREFIX
+ self.bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID']
+ '/global/httpHealthChecks/' + hc)
def _set_all_hcs(self, upsert, hc):
'''Set all health checks in upsert to hc.
'''
upsert['defaultService']['healthCheck'] = hc
for host_rule in upsert['hostRules']:
path_matcher = host_rule['pathMatcher']
path_matcher['defaultService']['healthCheck'] = hc
for path_rule in path_matcher['pathRules']:
path_rule['backendService']['healthCheck'] = hc
def _add_contract_clauses(self, contract_builder, upsert):
'''Add the proper predicates to the contract builder for a given
upsert description.
'''
host_rules = upsert['hostRules'] # Host rules will be distinct.
backend_services = [upsert['defaultService']]
for host_rule in host_rules:
path_matcher = host_rule['pathMatcher']
backend_services.append(path_matcher['defaultService'])
for path_rule in path_matcher['pathRules']:
backend_services.append(path_rule['backendService'])
health_checks = [service['healthCheck'] for service in backend_services]
hc_clause_builder = (contract_builder
.new_clause_builder('Health Checks Created',
retryable_for_secs=30)
.list_resource('httpHealthChecks'))
for hc in health_checks:
hc_clause_builder.contains_match({
'name': jp.STR_EQ(hc['name']),
'requestPath': jp.STR_EQ(hc['requestPath']),
'port': jp.NUM_EQ(hc['port'])})
bs_clause_builder = (contract_builder.
new_clause_builder('Backend Services Created',
retryable_for_secs=30).
list_resource('backendServices'))
for bs in backend_services:
bs_clause_builder.contains_match({
'name': jp.STR_EQ(bs['name']),
'portName': jp.STR_EQ('http'),
'healthChecks':
jp.LIST_MATCHES([
jp.STR_EQ(self._get_hc_link(bs['healthCheck']['name']))])
})
url_map_clause_builder = (contract_builder
.new_clause_builder('Url Map Created',
retryable_for_secs=30)
.list_resource('urlMaps'))
for hr in host_rules:
pm = hr['pathMatcher']
path_rules_spec = [
jp.DICT_MATCHES({
'service': jp.STR_EQ(
self._get_bs_link(pr['backendService']['name'])),
'paths':
jp.LIST_MATCHES([jp.STR_EQ(path) for path in pr['paths']])
})
for pr in pm['pathRules']]
path_matchers_spec = {
'defaultService':
jp.STR_EQ(self._get_bs_link(pm['defaultService']['name'])),
'pathRules': jp.LIST_MATCHES(path_rules_spec)
}
url_map_clause_builder.contains_match({
'name': jp.STR_EQ(self.__lb_name),
'defaultService':
jp.STR_EQ(self._get_bs_link(upsert['defaultService']['name'])),
'hostRules/hosts':
jp.LIST_MATCHES([jp.STR_SUBSTR(host)
for host in hr['hostPatterns']]),
'pathMatchers': jp.LIST_MATCHES([jp.DICT_MATCHES(path_matchers_spec)]),
})
port_string = '443-443'
if upsert['certificate'] == '':
port_string = '%s-%s' % (upsert['portRange'], upsert['portRange'])
(contract_builder.new_clause_builder('Forwarding Rule Created',
retryable_for_secs=30)
.list_resource('globalForwardingRules')
.contains_match({
'name': jp.STR_EQ(self.__lb_name),
'portRange': jp.STR_EQ(port_string)
}))
proxy_clause_builder = contract_builder.new_clause_builder(
'Target Proxy Created', retryable_for_secs=30)
self._add_proxy_clause(upsert['certificate'], proxy_clause_builder)
def _add_proxy_clause(self, certificate, proxy_clause_builder):
target_proxy_name = '%s-target-%s-proxy'
if certificate:
target_proxy_name = target_proxy_name % (self.__lb_name, 'https')
(proxy_clause_builder.list_resource('targetHttpsProxies')
.contains_path_eq('name', target_proxy_name))
else:
target_proxy_name = target_proxy_name % (self.__lb_name, 'http')
(proxy_clause_builder.list_resource('targetHttpProxies')
.contains_path_eq('name', target_proxy_name))
def upsert_full_load_balancer(self):
'''Upserts L7 LB with full hostRules, pathMatchers, etc.
Calls the upsertLoadBalancer operation with a payload, then verifies that
the expected resources are visible on GCP.
'''
hc = copy.deepcopy(self.__proto_hc)
hc['requestPath'] = '/'
hc['port'] = 80
upsert = copy.deepcopy(self.__proto_upsert)
self._set_all_hcs(upsert, hc)
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='upsert full http lb',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def upsert_min_load_balancer(self):
'''Upserts a L7 LB with the minimum description.
'''
upsert = copy.deepcopy(self.__proto_upsert)
upsert['hostRules'] = []
upsert['certificate'] = '' # Test HTTP upsert, not HTTPS.
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='upsert min http lb',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def delete_http_load_balancer(self):
'''Deletes the L7 LB.
'''
bindings = self.bindings
delete = copy.deepcopy(self.__proto_delete)
payload = self.agent.make_json_payload_from_kwargs(
job=[delete],
description='Delete L7 Load Balancer: {0} in {1}:{2}'.format(
self.__lb_name,
bindings['SPINNAKER_GOOGLE_ACCOUNT'],
bindings['TEST_GCE_REGION'],
),
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
(contract_builder.new_clause_builder('Health Check Removed',
retryable_for_secs=30)
.list_resource('httpHealthChecks')
.excludes_path_value('name', self.__proto_hc['name'])
)
(contract_builder.new_clause_builder('Url Map Removed',
retryable_for_secs=30)
.list_resource('urlMaps')
.excludes_path_value('name', self.__lb_name)
)
(contract_builder.new_clause_builder('Forwarding Rule Removed',
retryable_for_secs=30)
.list_resource('globalForwardingRules')
.excludes_path_value('name', self.__lb_name)
)
return st.OperationContract(
self.new_post_operation(
title='delete_http_load_balancer', data=payload, path='tasks'),
contract=contract_builder.build())
def change_health_check(self):
'''Changes the health check associated with the LB.
'''
upsert = copy.deepcopy(self.__proto_upsert)
hc = copy.deepcopy(self.__proto_hc)
hc['requestPath'] = '/changedPath'
hc['port'] = 8080
self._set_all_hcs(upsert, hc)
payload = self.agent.make_json_payload_from_kwargs(
job=[upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, upsert)
return st.OperationContract(
self.new_post_operation(title='change health checks',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def change_backend_service(self):
'''Changes the default backend service associated with the LB.
'''
hc = copy.deepcopy(self.__proto_hc)
bs_upsert = copy.deepcopy(self.__proto_upsert)
hc['name'] = 'updated-' + self.TEST_APP
hc['requestPath'] = '/changedPath1'
hc['port'] = 8080
bs_upsert['defaultService']['healthCheck'] = hc
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='change backend services',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_host_rule(self):
'''Adds a host rule to the url map.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
hr = copy.deepcopy(bs_upsert['hostRules'][0])
hr['hostPatterns'] = ['added.host1.com', 'added.host2.com']
hr['pathMatcher']['pathRules'][0]['paths'] = ['/added/path']
bs_upsert['hostRules'].append(hr)
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='add host rule',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def update_host_rule(self):
'''Updates a host rule to the url map.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
hr = copy.deepcopy(bs_upsert['hostRules'][0])
hr['hostPatterns'] = ['updated.host1.com']
hr['pathMatcher']['pathRules'][0]['paths'] = ['/updated/path']
bs_upsert['hostRules'].append(hr)
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title='update host rule',
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_cert(self, certname, title):
'''Add cert to targetHttpProxy to make it a targetHttpsProxy.
'''
bs_upsert = copy.deepcopy(self.__proto_upsert)
bs_upsert['certificate'] = certname
payload = self.agent.make_json_payload_from_kwargs(
job=[bs_upsert],
description='Upsert L7 Load Balancer: ' + self.__lb_name,
application=self.TEST_APP
)
contract_builder = gcp.GcpContractBuilder(self.gcp_observer)
self._add_contract_clauses(contract_builder, bs_upsert)
return st.OperationContract(
self.new_post_operation(title=title,
data=payload, path='tasks'),
contract=contract_builder.build()
)
def add_security_group(self):
'''Associates a security group with the L7 load balancer.
'''
bindings = self.bindings
sec_group_payload = self.agent.make_json_payload_from_kwargs(
job=[
{
'allowed': [
{
'ipProtocol': 'tcp',
'portRanges': ['80-80']
},
{
'ipProtocol': 'tcp',
'portRanges': ['8080-8080']
},
{
'ipProtocol': 'tcp',
'portRanges': ['443-443']
}
],
'backingData': {'networks': ['default']},
'cloudProvider': 'gce',
'application': self.TEST_APP,
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'description': '',
'detail': 'http',
'ipIngress': [
{
'type': 'tcp',
'startPort': 80,
'endPort': 80,
},
{
'type': 'tcp',
'startPort': 8080,
'endPort': 8080,
},
{
'type': 'tcp',
'startPort': 443,
'endPort': 443,
}
],
'name': self.__lb_name + '-rule',
'network': 'default',
'region': 'global',
'securityGroupName': self.__lb_name + '-rule',
'sourceRanges': ['0.0.0.0/0'],
'targetTags': [self.__lb_name + '-tag'],
'type': 'upsertSecurityGroup',
'user': '[anonymous]'
}
],
description='Create a Security Group for L7 operations.',
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Security Group Created',
retryable_for_secs=30)
.list_resource('firewalls')
.contains_path_value('name', self.__lb_name + '-rule'))
return st.OperationContract(
self.new_post_operation(title='create security group',
data=sec_group_payload, path='tasks'),
contract=builder.build()
)
def delete_security_group(self):
'''Deletes a security group.
'''
bindings = self.bindings
sec_group_payload = self.agent.make_json_payload_from_kwargs(
job=[
{
'cloudProvider': 'gce',
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'regions': ['global'],
'securityGroupName': self.__lb_name + '-rule',
'type': 'deleteSecurityGroup',
'user': '[anonymous]'
}
],
description='Delete a Security Group.',
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Security Group Deleted',
retryable_for_secs=30)
.list_resource('firewalls')
.excludes_path_value('name', self.__lb_name + '-rule'))
return st.OperationContract(
self.new_post_operation(title='delete security group',
data=sec_group_payload, path='tasks'),
contract=builder.build()
)
def add_server_group(self):
'''Adds a server group to the L7 LB.
'''
time.sleep(60) # Wait for the L7 LB to be ready.
bindings = self.bindings
group_name = '{app}-{stack}-v000'.format(app=self.TEST_APP,
stack=bindings['TEST_STACK'])
policy = {
'balancingMode': 'UTILIZATION',
'listeningPort': 80,
'maxUtilization': 0.8,
'capacityScaler': 0.8
}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'gce',
'application': self.TEST_APP,
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'strategy':'',
'capacity': {'min':1, 'max':1, 'desired':1},
'targetSize': 1,
'image': bindings['TEST_GCE_IMAGE_NAME'],
'zone': bindings['TEST_GCE_ZONE'],
'stack': bindings['TEST_STACK'],
'instanceType': 'f1-micro',
'type': 'createServerGroup',
'tags': [self.__lb_name + '-tag'],
'loadBalancers': [self.__lb_name],
'backendServices': {self.__lb_name: ['bs-' + self.TEST_APP]},
'disableTraffic': False,
'loadBalancingPolicy': {
'balancingMode': 'UTILIZATION',
'listeningPort': 80,
'maxUtilization': 0.8,
'capacityScaler': 0.8
},
'availabilityZones': {
bindings['TEST_GCE_REGION']: [bindings['TEST_GCE_ZONE']]
},
'instanceMetadata': {
'startup-script': ('sudo apt-get update'
' && sudo apt-get install apache2 -y'),
'global-load-balancer-names': self.__lb_name,
'backend-service-names': 'bs-' + self.TEST_APP,
'load-balancing-policy': json.dumps(policy)
},
'account': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'authScopes': ['compute'],
'user': '[anonymous]'
}],
description='Create Server Group in ' + group_name,
application=self.TEST_APP
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Added',
retryable_for_secs=30)
.inspect_resource('instanceGroupManagers', group_name)
.contains_path_eq('targetSize', 1)
)
return st.OperationContract(
self.new_post_operation(title='create server group',
data=payload, path='tasks'),
contract=builder.build()
)
def delete_server_group(self):
"""Creates OperationContract for deleteServerGroup.
To verify the operation, we just check that the GCP managed instance group
is no longer visible on GCP (or is in the process of terminating).
"""
bindings = self.bindings
group_name = '{app}-{stack}-v000'.format(
app=self.TEST_APP, stack=bindings['TEST_STACK'])
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'cloudProvider': 'gce',
'serverGroupName': group_name,
'region': bindings['TEST_GCE_REGION'],
'zone': bindings['TEST_GCE_ZONE'],
'type': 'destroyServerGroup',
'regions': [bindings['TEST_GCE_REGION']],
'zones': [bindings['TEST_GCE_ZONE']],
'credentials': bindings['SPINNAKER_GOOGLE_ACCOUNT'],
'user': '[anonymous]'
}],
application=self.TEST_APP,
description='DestroyServerGroup: ' + group_name
)
builder = gcp.GcpContractBuilder(self.gcp_observer)
(builder.new_clause_builder('Managed Instance Group Removed')
.inspect_resource('instanceGroupManagers', group_name,
no_resource_ok=True)
.contains_path_eq('targetSize', 0))
(builder.new_clause_builder('Instances Are Removed',
retryable_for_secs=30)
.list_resource('instances')
.excludes_path_value('name', group_name))
return st.OperationContract(
self.new_post_operation(
title='delete server group', data=payload, path='tasks'),
contract=builder.build()
)
|
|
"""
Load npy xy, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font', family = 'serif', serif = 'cmr10')
import numpy as np
from datetime import timedelta
import datetime
import imp
import re
from textwrap import wrap
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
#unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py')
###############
# Things to change
top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal'
pp_file = 'avg.5216'
lon_max = 71
lon_min = 67
lat_max= 28
lat_min=20
trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/'
trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bit_above_western_ghats.npz" % (lat_min,lat_max, lon_min, lon_max)
#############
# Make own time x-axis
d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1))
formatter = matplotlib.dates.DateFormatter('%H:%M')
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
experiment_ids_p = [ 'dkjxq', 'djznq' ] # Most of Params
experiment_ids_e = ['dkhgu', 'dkbhu'] # Most of Explicit
#experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#plt.ion()
NUM_COLOURS = 15
cmap=cm.get_cmap(cm.Set1, NUM_COLOURS)
#cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS))
for ls in ['land', 'sea', 'total']:
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
legendEntries=[]
legendtext=[]
plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file))
dates_trmm=[]
p=[]
for dp in plot_trmm['hour']:
print dp
if ((int(dp)<23) & (int(dp)>=6)):
dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
if ((int(dp)>=0) & (int(dp)<=6)):
dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0))
p.append(plot_trmm['mean'][plot_trmm['hour']==dp])
#print dates_trmm
a = np.argsort(dates_trmm,axis=0)
d_trmm = np.array(dates_trmm)[a]
pl = (np.array(p)[a])
#pl=np.sort(pl,axis=1)
l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626')
legendEntries.append(l)
legendtext.append('TRMM')
l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1))
# Change the legend label colors to almost black
texts = l0.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
for c, experiment_id in enumerate(experiment_ids_p):
expmin1 = experiment_id[:-1]
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1))
# Change the legend label colors to almost black
texts = l1.texts
for t in texts:
t.set_color('#262626')
legendEntries=[]
legendtext=[]
c1=0
for c, experiment_id in enumerate(experiment_ids_e):
if (experiment_id=='djznw'):
print experiment_id
colour = cmap(1.*1/NUM_COLOURS)
linewidth=0.2
linestylez='--'
if (experiment_id=='djzny'):
print experiment_id
colour = cmap(1.*3/NUM_COLOURS)
linewidth=0.5
linestylez='--'
if ((experiment_id=='djznq') or (experiment_id=='dkjxq')):
print experiment_id
colour = cmap(1.*5/NUM_COLOURS)
linewidth=0.8
if (experiment_id=='djznq'):
linestylez='--'
if (experiment_id=='dkjxq'):
linestylez=':'
if ((experiment_id=='dklzq') or (experiment_id=='dklwu')):
print experiment_id
colour = cmap(1.*7/NUM_COLOURS)
linewidth=1
if (experiment_id=='dklzq'):
linestylez='--'
if (experiment_id=='dklwu'):
linestylez='-'
if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')):
print experiment_id
colour = cmap(1.*9/NUM_COLOURS)
linewidth=1.3
if (experiment_id=='dkmbq'):
linestylez='--'
if (experiment_id=='dklyu'):
linestylez='-'
if (experiment_id=='djzns'):
print experiment_id
colour = cmap(1.*11/NUM_COLOURS)
linewidth=1.6
linestylez='-'
if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')):
print experiment_id
colour = cmap(1.*13/NUM_COLOURS)
linewidth=1.9
if (experiment_id=='dkbhu'):
linestylez='-'
if (experiment_id=='dkhgu'):
linestylez=':'
if (experiment_id=='djznu'):
print experiment_id
colour = cmap(1.*15/NUM_COLOURS)
linewidth=2.
linestylez='-'
expmin1 = experiment_id[:-1]
try:
plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max))
l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour)
legendEntries.append(l)
legendtext.append('%s' % (model_name_convert_legend.main(experiment_id)))
except Exception, e:
print e
pass
l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8})
plt.gca().add_artist(l1)
plt.gca().add_artist(l0)
plt.gca().xaxis.set_major_formatter(formatter)
# Change the legend label colors to almost black
texts = l2.texts
for t in texts:
t.set_color('#262626')
plt.xlabel('Time (UTC)')
plt.ylabel('mm/h')
title="Domain Averaged Rainfall - %s" % ls
t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL)
t = re.sub(r'[(\']', ' ', t)
t = re.sub(r'[\',)]', ' ', t)
pp_filenodot= pp_file.replace(".", "")
# Bit of formatting
# Set colour of axis lines
spines_to_keep = ['bottom', 'left']
for spine in spines_to_keep:
ax.spines[spine].set_linewidth(0.5)
ax.spines[spine].set_color('#262626')
# Remove top and right axes lines ("spines")
spines_to_remove = ['top', 'right']
for spine in spines_to_remove:
ax.spines[spine].set_visible(False)
# Get rid of ticks. The position of the numbers is informative enough of
# the position of the value.
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
# Change the labels to the off-black
ax.xaxis.label.set_color('#262626')
ax.yaxis.label.set_color('#262626')
if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/')
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bit_above_western_ghats_notitle_largeonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bit_above_western_ghats_largeonly.png' % (pp_filenodot, ls), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
|
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=W0621 # redefined-outer-name
import hashlib
import logging
import os
import subprocess
import sys
from typing import Any, Dict, List, TextIO
import pexpect
import pytest
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from pygdbmi.gdbcontroller import GdbController, GdbTimeoutError, NoGdbProcessError
from pytest_embedded_idf.app import IdfApp
from pytest_embedded_idf.dut import IdfDut
from pytest_embedded_idf.serial import IdfSerial
def sha256(file: str) -> str:
res = hashlib.sha256()
with open(file, 'rb') as fr:
res.update(fr.read())
return res.hexdigest()
class PanicTestDut(IdfDut):
BOOT_CMD_ADDR = 0x9000
BOOT_CMD_SIZE = 0x1000
DEFAULT_EXPECT_TIMEOUT = 10
COREDUMP_UART_START = '================= CORE DUMP START ================='
COREDUMP_UART_END = '================= CORE DUMP END ================='
app: IdfApp
serial: IdfSerial
def __init__(self, *args, **kwargs) -> None: # type: ignore
super().__init__(*args, **kwargs)
self.gdb: GdbController = None # type: ignore
# record this since pygdbmi is using logging.debug to generate some single character mess
self.log_level = logging.getLogger().level
# pygdbmi is using logging.debug to generate some single character mess
if self.log_level <= logging.DEBUG:
logging.getLogger().setLevel(logging.INFO)
self.coredump_output: TextIO = None # type: ignore
def close(self) -> None:
if self.gdb:
self.gdb.exit()
super().close()
def revert_log_level(self) -> None:
logging.getLogger().setLevel(self.log_level)
def expect_test_func_name(self, test_func_name: str) -> None:
self.expect_exact('Enter test name:')
self.write(test_func_name)
self.expect_exact('Got test name: ' + test_func_name)
def expect_none(self, pattern, **kwargs) -> None: # type: ignore
"""like dut.expect_all, but with an inverse logic"""
if 'timeout' not in kwargs:
kwargs['timeout'] = 1
try:
res = self.expect(pattern, **kwargs)
raise AssertionError(f'Unexpected: {res.group().decode("utf8")}')
except pexpect.TIMEOUT:
pass
def expect_backtrace(self) -> None:
self.expect_exact('Backtrace:')
self.expect_none('CORRUPTED')
def expect_gme(self, reason: str) -> None:
"""Expect method for Guru Meditation Errors"""
self.expect_exact(f"Guru Meditation Error: Core 0 panic'ed ({reason})")
def expect_reg_dump(self, core: int = 0) -> None:
"""Expect method for the register dump"""
self.expect(r'Core\s+%d register dump:' % core)
def expect_elf_sha256(self) -> None:
"""Expect method for ELF SHA256 line"""
elf_sha256 = sha256(self.app.elf_file)
elf_sha256_len = int(
self.app.sdkconfig.get('CONFIG_APP_RETRIEVE_LEN_ELF_SHA', '16')
)
self.expect_exact('ELF file SHA256: ' + elf_sha256[0:elf_sha256_len])
def _call_espcoredump(
self, extra_args: List[str], coredump_file_name: str, output_file_name: str
) -> None:
# no "with" here, since we need the file to be open for later inspection by the test case
if not self.coredump_output:
self.coredump_output = open(output_file_name, 'w')
espcoredump_script = os.path.join(
os.environ['IDF_PATH'], 'components', 'espcoredump', 'espcoredump.py'
)
espcoredump_args = [
sys.executable,
espcoredump_script,
'info_corefile',
'--core',
coredump_file_name,
]
espcoredump_args += extra_args
espcoredump_args.append(self.app.elf_file)
logging.info('Running %s', ' '.join(espcoredump_args))
logging.info('espcoredump output is written to %s', self.coredump_output.name)
subprocess.check_call(espcoredump_args, stdout=self.coredump_output)
self.coredump_output.flush()
self.coredump_output.seek(0)
def process_coredump_uart(self) -> None:
"""Extract the core dump from UART output of the test, run espcoredump on it"""
self.expect(self.COREDUMP_UART_START)
res = self.expect('(.+)' + self.COREDUMP_UART_END)
coredump_base64 = res.group(1).decode('utf8')
with open(os.path.join(self.logdir, 'coredump_data.b64'), 'w') as coredump_file:
logging.info('Writing UART base64 core dump to %s', coredump_file.name)
coredump_file.write(coredump_base64)
output_file_name = os.path.join(self.logdir, 'coredump_uart_result.txt')
self._call_espcoredump(
['--core-format', 'b64'], coredump_file.name, output_file_name
)
def process_coredump_flash(self) -> None:
"""Extract the core dump from flash, run espcoredump on it"""
coredump_file_name = os.path.join(self.logdir, 'coredump_data.bin')
logging.info('Writing flash binary core dump to %s', coredump_file_name)
self.serial.dump_flash(coredump_file_name, partition='coredump')
output_file_name = os.path.join(self.logdir, 'coredump_flash_result.txt')
self._call_espcoredump(
['--core-format', 'raw'], coredump_file_name, output_file_name
)
def gdb_write(self, command: str) -> Any:
"""
Wrapper to write to gdb with a longer timeout, as test runner
host can be slow sometimes
"""
return self.gdb.write(command, timeout_sec=10)
def start_gdb(self) -> None:
"""
Runs GDB and connects it to the "serial" port of the DUT.
After this, the DUT expect methods can no longer be used to capture output.
"""
self.gdb = GdbController(gdb_path=self.toolchain_prefix + 'gdb')
# pygdbmi logs to console by default, make it log to a file instead
pygdbmi_log_file_name = os.path.join(self.logdir, 'pygdbmi_log.txt')
pygdbmi_logger = self.gdb.logger
pygdbmi_logger.setLevel(logging.DEBUG)
while pygdbmi_logger.hasHandlers():
pygdbmi_logger.removeHandler(pygdbmi_logger.handlers[0])
log_handler = logging.FileHandler(pygdbmi_log_file_name)
log_handler.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
)
pygdbmi_logger.addHandler(log_handler)
logging.info('Running command: %s', self.gdb.get_subprocess_cmd())
for _ in range(10):
try:
# GdbController creates a process with subprocess.Popen(). Is it really running? It is probable that
# an RPI under high load will get non-responsive during creating a lot of processes.
resp = self.gdb.get_gdb_response(
timeout_sec=10
) # calls verify_valid_gdb_subprocess() internally
# it will be interesting to look up this response if the next GDB command fails (times out)
logging.info('GDB response: %s', resp)
break # success
except GdbTimeoutError:
logging.warning(
'GDB internal error: cannot get response from the subprocess'
)
except NoGdbProcessError:
logging.error('GDB internal error: process is not running')
break # failure - TODO: create another GdbController
except ValueError:
logging.error(
'GDB internal error: select() returned an unexpected file number'
)
# Set up logging for GDB remote protocol
gdb_remotelog_file_name = os.path.join(self.logdir, 'gdb_remote_log.txt')
self.gdb_write('-gdb-set remotelogfile ' + gdb_remotelog_file_name)
# Load the ELF file
self.gdb_write('-file-exec-and-symbols {}'.format(self.app.elf_file))
# Connect GDB to UART
self.serial.proc.close()
logging.info('Connecting to GDB Stub...')
self.gdb_write('-gdb-set serial baud 115200')
responses = self.gdb_write('-target-select remote ' + self.serial.port)
# Make sure we get the 'stopped' notification
stop_response = self.find_gdb_response('stopped', 'notify', responses)
if not stop_response:
responses = self.gdb_write('-exec-interrupt')
stop_response = self.find_gdb_response('stopped', 'notify', responses)
assert stop_response
frame = stop_response['payload']['frame']
if 'file' not in frame:
frame['file'] = '?'
if 'line' not in frame:
frame['line'] = '?'
logging.info('Stopped in {func} at {addr} ({file}:{line})'.format(**frame))
# Drain remaining responses
self.gdb.get_gdb_response(raise_error_on_timeout=False)
def gdb_backtrace(self) -> Any:
"""
Returns the list of stack frames for the current thread.
Each frame is a dictionary, refer to pygdbmi docs for the format.
"""
assert self.gdb
responses = self.gdb_write('-stack-list-frames')
return self.find_gdb_response('done', 'result', responses)['payload']['stack']
@staticmethod
def match_backtrace(
gdb_backtrace: List[Any], expected_functions_list: List[Any]
) -> bool:
"""
Returns True if the function names listed in expected_functions_list match the backtrace
given by gdb_backtrace argument. The latter is in the same format as returned by gdb_backtrace()
function.
"""
return all(
[
frame['func'] == expected_functions_list[i]
for i, frame in enumerate(gdb_backtrace)
]
)
@staticmethod
def find_gdb_response(
message: str, response_type: str, responses: List[Any]
) -> Any:
"""
Helper function which extracts one response from an array of GDB responses, filtering
by message and type. Returned message is a dictionary, refer to pygdbmi docs for the format.
"""
def match_response(response: Dict[str, Any]) -> bool:
return response['message'] == message and response['type'] == response_type # type: ignore
filtered_responses = [r for r in responses if match_response(r)]
if not filtered_responses:
return None
return filtered_responses[0]
@pytest.fixture(scope='module')
def monkeypatch_module(request: FixtureRequest) -> MonkeyPatch:
mp = MonkeyPatch()
request.addfinalizer(mp.undo)
return mp
@pytest.fixture(scope='module', autouse=True)
def replace_dut_class(monkeypatch_module: MonkeyPatch) -> None:
monkeypatch_module.setattr('pytest_embedded_idf.dut.IdfDut', PanicTestDut)
|
|
import os
import os.path
import cherrypy
from cherrypy.lib import static
from cherrypy.lib.static import serve_file
import shutil
import tempfile
import glob
import random
from clarityviz import claritybase
from clarityviz import densitygraph as dg
from clarityviz import atlasregiongraph as arg
import networkx as nx
import plotly
# import matplotlib
# import matplotlib.pyplot as plt
from ndreg import *
import ndio.remote.neurodata as neurodata
import nibabel as nb
from numpy import genfromtxt
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
# print absDir
def imgGet(inToken):
refToken = "ara_ccf2" # hardcoded 'ara_ccf2' atlas until additional functionality is requested
refImg = imgDownload(refToken) # download atlas
refAnnoImg = imgDownload(refToken, channel="annotation")
print "reference token/atlas obtained"
inImg = imgDownload(inToken, resolution=5) # store downsampled level 5 brain to memory
(values, bins) = np.histogram(sitk.GetArrayFromImage(inImg), bins=100, range=(0,500))
print "level 5 brain obtained"
counts = np.bincount(values)
maximum = np.argmax(counts)
tupleResolution = inImg.GetSpacing(); # spacing here used to scale images to mm size (anisotropic resolution)
# EG: for Aut1367, the spacing is (0.01872, 0.01872, 0.005).
xResolution = tupleResolution[0]
yResolution = tupleResolution[1]
zResolution = tupleResolution[2]
# Now, to get the mm image size, we can multiply all x, y, z
# to get the proper mm size when plotting.
lowerThreshold = maximum
upperThreshold = sitk.GetArrayFromImage(inImg).max()+1
inImg = sitk.Threshold(inImg,lowerThreshold,upperThreshold,lowerThreshold) - lowerThreshold
print "applied filtering"
rawImg = sitk.GetArrayFromImage(inImg)
xdimensions = len(rawImg[:,0,0])
ydimensions = len(rawImg[0,:,0])
zdimensions = len(rawImg[0,0,:])
xyz = []
for i in range(40000):
value = 0
while(value == 0):
xval = random.sample(xrange(0,xdimensions), 1)[0]
yval = random.sample(xrange(0,ydimensions), 1)[0]
zval = random.sample(xrange(0,zdimensions), 1)[0]
value = rawImg[xval,yval,zval]
if [xval, yval, zval] not in xyz and value > 300:
xyz.append([xval, yval, zval])
else:
value = 0
rImg = claritybase.claritybase(inToken + 'raw', None)
rImg.savePoints(None,xyz)
rImg.generate_plotly_html()
print "random sample of points above 250"
spacingImg = inImg.GetSpacing()
spacing = tuple(i * 50 for i in spacingImg)
inImg.SetSpacing(spacingImg)
inImg_download = inImg # Aut1367 set to default spacing
inImg = imgResample(inImg, spacing=refImg.GetSpacing())
print "resampled img"
Img_reorient = imgReorient(inImg, "LPS", "RSA") # reoriented Aut1367
refImg_ds = imgResample(refImg, spacing=spacing) # atlas with downsampled spacing 10x
inImg_ds = imgResample(Img_reorient, spacing=spacing) # Aut1367 with downsampled spacing 10x
print "reoriented image"
affine = imgAffineComposite(inImg_ds, refImg_ds, iterations=100, useMI=True, verbose=True)
inImg_affine = imgApplyAffine(Img_reorient, affine, size=refImg.GetSize())
print "affine"
inImg_ds = imgResample(inImg_affine, spacing=spacing)
(field, invField) = imgMetamorphosisComposite(inImg_ds, refImg_ds, alphaList=[0.05, 0.02, 0.01], useMI=True, iterations=100, verbose=True)
inImg_lddmm = imgApplyField(inImg_affine, field, size=refImg.GetSize())
print "downsampled image"
invAffine = affineInverse(affine)
invAffineField = affineToField(invAffine, refImg.GetSize(), refImg.GetSpacing())
invField = fieldApplyField(invAffineField, invField)
inAnnoImg = imgApplyField(refAnnoImg, invField,useNearest=True, size=Img_reorient.GetSize())
inAnnoImg = imgReorient(inAnnoImg, "RSA", "LPS")
inAnnoImg = imgResample(inAnnoImg, spacing=inImg_download.GetSpacing(), size=inImg_download.GetSize(), useNearest=True)
print "inverse affine"
imgName = inToken + "reorient_atlas"
location = "img/" + imgName + ".nii"
imgWrite(inAnnoImg, str(location))
# ndImg = sitk.GetArrayFromImage(inAnnoImg)
# sitk.WriteImage(inAnnoImg, location)
print "generated output"
print imgName
return imgName
def image_parse(inToken):
imgName = imgGet(inToken)
# imgName = inToken + 'reorient_atlas'
copydir = os.path.join(os.getcwd(), os.path.dirname('img/'))
img = claritybase.claritybase(imgName, copydir) # initial call for clarityviz
print "loaded into claritybase"
img.loadEqImg()
print "loaded image"
img.applyLocalEq()
print "local histogram equalization"
img.loadGeneratedNii()
print "loaded generated nii"
img.calculatePoints(threshold = 0.9, sample = 0.05)
print "calculating points"
img.brightPoints(None,40000)
print "saving brightest points to csv"
# img.savePoints()
img.generate_plotly_html()
print "generating plotly"
img.plot3d()
print "generating nodes and edges list"
img.graphmlconvert()
print "generating graphml"
img.get_brain_figure(None, imgName + ' edgecount')
print "generating density graph"
return imgName
def density_graph(Token):
densg = dg.densitygraph(Token)
print 'densitygraph module'
densg.generate_density_graph()
print 'generated density graph'
g = nx.read_graphml(Token + '/' + Token + '.graphml')
ggraph = densg.get_brain_figure(g = g, plot_title=Token)
plotly.offline.plot(ggraph, filename = Token + '/' + Token + '_brain_figure.html')
hm = densg.generate_heat_map()
plotly.offline.plot(hm, filename = Token + '/' + Token + '_brain_heatmap.html')
def atlas_region(Token):
atlas_img = Token + '/' + Token + 'localeq' + '.nii'
atlas = nb.load(atlas_img) # <- atlas .nii image
atlas_data = atlas.get_data()
csvfile = Token + '/' + Token + 'localeq.csv' # 'atlasexp/Control258localeq.csv' # <- regular csv from the .nii to csv step
bright_points = genfromtxt(csvfile, delimiter=',')
locations = bright_points[:, 0:3]
regions = [atlas_data[l[0], l[1], l[2]] for l in locations]
outfile = open(Token + '/' + Token + '.region.csv', 'w')
infile = open(csvfile, 'r')
for i, line in enumerate(infile):
line = line.strip().split(',')
outfile.write(",".join(line) + "," + str(regions[i]) + "\n") # adding a 5th column to the original csv indicating its region (integer)
infile.close()
outfile.close()
print len(regions)
print regions[0:10]
uniq = list(set(regions))
numRegions = len(uniq)
print len(uniq)
print uniq
newToken = Token + '.region'
atlas = arg.atlasregiongraph(newToken, Token)
atlas.generate_atlas_region_graph(None, numRegions)
class FileDemo(object):
@cherrypy.expose
def index(self, directory="."):
img = []
for filename in glob.glob('img/*'):
img.append(filename)
html = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>ClarityViz</title>
<!-- Bootstrap Core CSS -->
<link href="static/vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="static/vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<!-- Plugin CSS -->
<link href="static/vendor/magnific-popup/magnific-popup.css" rel="stylesheet">
<!-- Theme CSS -->
<link href="static/css/creative.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="static/css/style.css" rel="stylesheet">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body id="page-top">
<nav id="mainNav" class="navbar navbar-default navbar-fixed-top">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">
<span class="sr-only">Toggle navigation</span> Menu <i class="fa fa-bars"></i>
</button>
<a class="navbar-brand page-scroll" href="#page-top">ClarityViz</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav navbar-right">
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Project">Project Description</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Graph">Graph Explanations</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#About">About Us</a>
</li>
<li>
<a class="page-scroll" href="https://neurodatadesign.github.io/seelviz/#Acknowledgments">Acknowledgements</a>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</div>
<!-- /.container-fluid -->
</nav>
<header>
<div class="header-content">
<div class="header-content-inner">
<h1 id="homeHeading">Select File</h1>
<hr>
<!-- Columns start at 50% wide on mobile and bump up to 33.3% wide on desktop -->
<div class="row">
<div class="col-xs-6 col-md-4"></div>
<div class="col-xs-6 col-md-4">
<form action="upload" method="post" enctype="multipart/form-data">
<div class="form-group">
<label for="myFile">Upload a File</label>
<div class="center-block"></div>
<input type="file" class="form-control" id="myFile" name="myFile">
<!-- <p class="help-block">Example block-level help text here.</p> -->
</div>
<!-- filename: <input type="file" name="myFile" /><br /> -->
<input class="btn btn-default" type="submit" value="Submit">
</form>
<h2>OR</h2>
<form action="neurodata" method="post" enctype="multipart/form-data">
<div class="form-group">
<label for="myToken">Submit Token</label>
<input type="text" class="form-control" id="myToken" name="myToken" placeholder="Token">
</div>
<!-- Token name: <input type="text" name="myToken"/><br /> -->
<input class="btn btn-default" type="submit" value="Submit">
</form>
</div>
<div class="col-xs-6 col-md-4"></div>
</div>
</div>
</div>
</header>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2 class="section-heading">We've got what you need!</h2>
<hr class="light">
<p class="text-faded">Start Bootstrap has everything you need to get your new website up and running in no time! All of the templates and themes on Start Bootstrap are open source, free to download, and easy to use. No strings attached!</p>
<a href="#services" class="page-scroll btn btn-default btn-xl sr-button">Get Started!</a>
</div>
</div>
</div>
</section>
<section id="contact">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2 class="section-heading">Acknowledgements</h2>
<hr class="primary">
<p>Ready to start your next project with us? That's great! Give us a call or send us an email and we will get back to you as soon as possible!</p>
</div>
<div class="col-lg-4 col-lg-offset-2 text-center">
<i class="fa fa-phone fa-3x sr-contact"></i>
<p>123-456-6789</p>
</div>
<div class="col-lg-4 text-center">
<i class="fa fa-envelope-o fa-3x sr-contact"></i>
<p><a href="mailto:[email protected]">[email protected]</a></p>
</div>
</div>
</div>
</section>
<!-- jQuery -->
<script src="vendor/jquery/jquery.min.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="vendor/bootstrap/js/bootstrap.min.js"></script>
<!-- Plugin JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-easing/1.3/jquery.easing.min.js"></script>
<script src="vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Theme JavaScript -->
<script src="js/creative.min.js"></script>
</body>
</html>
"""
return html
@cherrypy.expose
def neurodata(self, myToken):
myToken = image_parse(myToken)
density_graph(myToken)
atlas_region(myToken)
fzip = shutil.make_archive(myToken, 'zip', myToken)
fzip_abs = os.path.abspath(fzip)
html = """
<html><body>
<h2>Ouputs</h2>
"""
plotly = []
for filename in glob.glob(myToken + '/*'):
absPath = os.path.abspath(filename)
if os.path.isdir(absPath):
link = '<a href="/index?directory=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
else:
if filename.endswith('html'):
plotly.append(filename)
link = '<a href="/download/?filepath=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
for plot in plotly:
absPath = os.path.abspath(plot)
html += """
<form action="plotly" method="get">
<input type="text" value=""" + '"' + absPath + '" name="plot" ' + """/>
<button type="submit">View """ + os.path.basename(plot) + """</button>
</form>"""
# html += '<a href="file:///' + '//' + absPath + '">' + "View Plotly graph</a> <br />"
html += '<a href="/download/?filepath=' + fzip_abs + '">' + myToken + '.zip' + "</a> <br />"
html += """</body></html>"""
return html
@cherrypy.expose
def upload(self, myFile):
copy = 'local/' + myFile.filename
print copy
token = myFile.filename.split('.')[:-1]
with open(copy, 'wb') as fcopy:
while True:
data = myFile.file.read(8192)
if not data:
break
fcopy.write(data)
copydir = os.path.join(os.getcwd(), os.path.dirname('local/'))
print copydir
csv = claritybase.claritybase(token, copydir)
csv.loadInitCsv(copydir + '/' + myFile.filename)
csv.plot3d()
csv.savePoints()
csv.generate_plotly_html()
csv.graphmlconvert()
fzip = shutil.make_archive(token, 'zip', token)
fzip_abs = os.path.abspath(fzip)
html = """
<html><body>
<h2>Ouputs</h2>
"""
plotly = []
for filename in glob.glob(token + '/*'):
absPath = os.path.abspath(filename)
if os.path.isdir(absPath):
link = '<a href="/index?directory=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
else:
if filename.endswith('html'):
plotly.append(filename)
link = '<a href="/download/?filepath=' + absPath + '">' + os.path.basename(filename) + "</a> <br />"
html += link
for plot in plotly:
absPath = os.path.abspath(plot)
html += """
<form action="plotly" method="get">
<input type="text" value=""" + '"' + absPath + '" name="plot" ' + """/>
<button type="submit">View """ + os.path.basename(plot) + """</button>
</form>"""
# html += '<a href="file:///' + '//' + absPath + '">' + "View Plotly graph</a> <br />"
html += '<a href="/download/?filepath=' + fzip_abs + '">' + token + '.zip' + "</a> <br />"
html += """</body></html>"""
return html
@cherrypy.expose
def plotly(self, plot="test/testplotly.html"):
return file(plot)
class Download:
@cherrypy.expose
def index(self, filepath):
return serve_file(filepath, "application/x-download", "attachment")
tutconf = os.path.join(os.path.dirname('/usr/local/lib/python2.7/dist-packages/cherrypy/tutorial/'), 'tutorial.conf')
# print tutconf
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to index().
current_dir = os.path.dirname(os.path.abspath(__file__)) + os.path.sep
config = {
'global': {
'environment': 'production',
'log.screen': True,
'server.socket_host': '0.0.0.0',
'server.socket_port': 80,
'server.thread_pool': 10,
'engine.autoreload_on': True,
'engine.timeout_monitor.on': False,
'log.error_file': os.path.join(current_dir, 'errors.log'),
'log.access_file': os.path.join(current_dir, 'access.log'),
},
'/':{
'tools.staticdir.root' : current_dir,
},
'/static':{
'tools.staticdir.on' : True,
'tools.staticdir.dir' : 'static',
'staticFilter.on': True,
'staticFilter.dir': '/home/Tony/static'
},
}
root = FileDemo()
root.download = Download()
cherrypy.tree.mount(root)
cherrypy.quickstart(root, '/', config=config)
# cherrypy.quickstart(root, config=tutconf)
|
|
# -*- Mode: Python -*-
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
__revision__ = "$Id: select_trigger.py,v 1.4 2003/01/09 15:49:15 akuchling Exp $"
import asyncore_25 as asyncore
import asynchat_25 as asynchat
import os
import socket
import string
import thread
if os.name == 'posix':
class trigger (asyncore.file_dispatcher):
"Wake up a call to select() running in the main thread"
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
def __init__ (self):
r, w = self._fds = os.pipe()
self.trigger = w
asyncore.file_dispatcher.__init__(self, r)
self.lock = thread.allocate_lock()
self.thunks = []
self._closed = 0
# Override the asyncore close() method, because it seems that
# it would only close the r file descriptor and not w. The
# constructor calls file_dispatcher.__init__ and passes r,
# which would get stored in a file_wrapper and get closed by
# the default close. But that would leave w open...
def close(self):
if not self._closed:
self._closed = 1
self.del_channel()
for fd in self._fds:
os.close(fd)
self._fds = []
def __repr__ (self):
return '<select-trigger (pipe) at %x>' % id(self)
def readable (self):
return 1
def writable (self):
return 0
def handle_connect (self):
pass
def handle_close(self):
self.close()
def pull_trigger (self, thunk=None):
# print 'PULL_TRIGGER: ', len(self.thunks)
if thunk:
self.lock.acquire()
try:
self.thunks.append(thunk)
finally:
self.lock.release()
os.write(self.trigger, 'x')
def handle_read (self):
try:
self.recv(8192)
except socket.error:
return
self.lock.acquire()
try:
for thunk in self.thunks:
try:
thunk()
except:
nil, t, v, tbinfo = asyncore.compact_traceback()
print ('exception in trigger thunk:'
' (%s:%s %s)' % (t, v, tbinfo))
self.thunks = []
finally:
self.lock.release()
else:
# win32-safe version
# XXX Should define a base class that has the common methods and
# then put the platform-specific in a subclass named trigger.
HOST = '127.0.0.1'
MINPORT = 19950
NPORTS = 50
class trigger (asyncore.dispatcher):
portoffset = 0
def __init__ (self):
a = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
w = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set TCP_NODELAY to true to avoid buffering
w.setsockopt(socket.IPPROTO_TCP, 1, 1)
# tricky: get a pair of connected sockets
for i in range(NPORTS):
trigger.portoffset = (trigger.portoffset + 1) % NPORTS
port = MINPORT + trigger.portoffset
address = (HOST, port)
try:
a.bind(address)
except socket.error:
continue
else:
break
else:
raise RuntimeError, 'Cannot bind trigger!'
a.listen(1)
w.setblocking(0)
try:
w.connect(address)
except:
pass
r, addr = a.accept()
a.close()
w.setblocking(1)
self.trigger = w
asyncore.dispatcher.__init__(self, r)
self.lock = thread.allocate_lock()
self.thunks = []
self._trigger_connected = 0
def __repr__ (self):
return '<select-trigger (loopback) at %x>' % id(self)
def readable (self):
return 1
def writable (self):
return 0
def handle_connect (self):
pass
def pull_trigger (self, thunk=None):
if thunk:
self.lock.acquire()
try:
self.thunks.append(thunk)
finally:
self.lock.release()
self.trigger.send('x')
def handle_read (self):
try:
self.recv(8192)
except socket.error:
return
self.lock.acquire()
try:
for thunk in self.thunks:
try:
thunk()
except:
nil, t, v, tbinfo = asyncore.compact_traceback()
print ('exception in trigger thunk:'
' (%s:%s %s)' % (t, v, tbinfo))
self.thunks = []
finally:
self.lock.release()
the_trigger = None
class trigger_file:
"A 'triggered' file object"
buffer_size = 4096
def __init__ (self, parent):
global the_trigger
if the_trigger is None:
the_trigger = trigger()
self.parent = parent
self.buffer = ''
def write (self, data):
self.buffer = self.buffer + data
if len(self.buffer) > self.buffer_size:
d, self.buffer = self.buffer, ''
the_trigger.pull_trigger (
lambda d=d,p=self.parent: p.push (d)
)
def writeline (self, line):
self.write (line+'\r\n')
def writelines (self, lines):
self.write (
string.joinfields (
lines,
'\r\n'
) + '\r\n'
)
def flush (self):
if self.buffer:
d, self.buffer = self.buffer, ''
the_trigger.pull_trigger (
lambda p=self.parent,d=d: p.push (d)
)
def softspace (self, *args):
pass
def close (self):
# in a derived class, you may want to call trigger_close() instead.
self.flush()
self.parent = None
def trigger_close (self):
d, self.buffer = self.buffer, ''
p, self.parent = self.parent, None
the_trigger.pull_trigger (
lambda p=p,d=d: (p.push(d), p.close_when_done())
)
if __name__ == '__main__':
import time
def thread_function (output_file, i, n):
print 'entering thread_function'
while n:
time.sleep (5)
output_file.write ('%2d.%2d %s\r\n' % (i, n, output_file))
output_file.flush()
n = n - 1
output_file.close()
print 'exiting thread_function'
class thread_parent (asynchat.async_chat):
def __init__ (self, conn, addr):
self.addr = addr
asynchat.async_chat.__init__ (self, conn)
self.set_terminator ('\r\n')
self.buffer = ''
self.count = 0
def collect_incoming_data (self, data):
self.buffer = self.buffer + data
def found_terminator (self):
data, self.buffer = self.buffer, ''
if not data:
asyncore.close_all()
print "done"
return
n = string.atoi (string.split (data)[0])
tf = trigger_file (self)
self.count = self.count + 1
thread.start_new_thread (thread_function, (tf, self.count, n))
class thread_server (asyncore.dispatcher):
def __init__ (self, family=socket.AF_INET, address=('', 9003)):
asyncore.dispatcher.__init__ (self)
self.create_socket (family, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind (address)
self.listen (5)
def handle_accept (self):
conn, addr = self.accept()
tp = thread_parent (conn, addr)
thread_server()
#asyncore.loop(1.0, use_poll=1)
try:
asyncore.loop ()
except:
asyncore.close_all()
|
|
#!/usr/bin/env python
try:
import gevent
except ImportError:
gevent = False
import pickle
import socket
import struct
import time
import random
from .formatter import GraphiteStructuredFormatter
_module_instance = None
default_graphite_pickle_port = 2004
default_graphite_plaintext_port = 2003
default_graphite_server = 'graphite'
VERSION = "0.10.0"
class GraphiteSendException(Exception):
pass
class GraphiteClient(object):
"""
Graphite Client that will setup a TCP connection to graphite.
:param prefix: string added to the start of all metrics
:type prefix: Default: "systems."
:param graphite_server: hostname or ip address of graphite server
:type graphite_server: Default: graphite
:param graphite_port: TCP port we will connect to
:type graphite_port: Default: 2003
:param debug: Toggle debug messages
:type debug: True or False
:param group: string added to after system_name and before metric name
:param system_name: FDQN of the system generating the metrics
:type system_name: Default: current FDQN
:param suffix: string added to the end of all metrics
:param lowercase_metric_names: Toggle the .lower() of all metric names
:param fqdn_squash: Change host.example.com to host_example_com
:type fqdn_squash: True or False
:param dryrun: Toggle if it will really send metrics or just return them
:type dryrun: True or False
:param timeout_in_seconds: Number of seconds before a connection is timed out.
:param asynchronous: Send messages asynchronouly via gevent (You have to monkey patch sockets for it to work)
:param clean_metric_name: Does GraphiteClient needs to clean metric's name
:type clean_metric_name: True or False
It will then send any metrics that you give it via
the .send() or .send_dict().
You can also take advantage of the prefix, group and system_name options
that allow you to setup default locations where your whisper files will
be kept.
eg.
( where 'linuxserver' is the name of the localhost)
.. code-block:: python
>>> init().prefix
systems.linuxserver.
>>> init(system_name='remote_host').prefix
systems.remote_host.
>>> init(group='cpu').prefix
systems.linuxserver.cpu.
>>> init(prefix='apache').prefix
apache.
"""
def __init__(self, prefix=None, graphite_server=None, graphite_port=2003,
timeout_in_seconds=2, debug=False, group=None,
system_name=None, suffix=None, lowercase_metric_names=False,
connect_on_create=True, fqdn_squash=False,
dryrun=False, asynchronous=False, autoreconnect=False,
clean_metric_name=True):
"""
setup the connection to the graphite server and work out the
prefix.
This allows for very simple syntax when sending messages to the
graphite server.
"""
# If we are not passed a host, then use the graphite server defined
# in the module.
if not graphite_server:
graphite_server = default_graphite_server
self.addr = (graphite_server, graphite_port)
# If this is a dry run, then we do not want to configure a connection
# or try and make the connection once we create the object.
self.dryrun = dryrun
if self.dryrun:
self.addr = None
graphite_server = None
connect_on_create = False
# Only connect to the graphite server and port if we tell you too.
# This is mostly used for testing.
self.timeout_in_seconds = int(timeout_in_seconds)
if connect_on_create:
self.connect()
self.debug = debug
self.lastmessage = None
self.asynchronous = False
if asynchronous:
self.asynchronous = self.enable_asynchronous()
self._autoreconnect = autoreconnect
self.formatter = GraphiteStructuredFormatter(prefix=prefix, group=group,
system_name=system_name, suffix=suffix,
lowercase_metric_names=lowercase_metric_names, fqdn_squash=fqdn_squash,
clean_metric_name=clean_metric_name)
@property
def prefix(self):
'''Backward compat - access to the properties on the default formatter
deprecated - use the formatter directly for this type of muckery.
'''
return self.formatter.prefix
@property
def suffix(self):
'''Backward compat - access to properties on the default formatter
deprecated - use the formatter directly for this type of muckery.
'''
return self.formatter.suffix
@property
def lowercase_metric_names(self):
'''Backward compat - access to properties on the default formatter
deprecated - use the formatter directly for this type of muckery.
'''
return self.formatter.lowercase_metric_names
def connect(self):
"""
Make a TCP connection to the graphite server on port self.port
"""
self.socket = socket.socket()
self.socket.settimeout(self.timeout_in_seconds)
try:
self.socket.connect(self.addr)
except socket.timeout:
raise GraphiteSendException(
"Took over %d second(s) to connect to %s" %
(self.timeout_in_seconds, self.addr))
except socket.gaierror:
raise GraphiteSendException(
"No address associated with hostname %s:%s" % self.addr)
except Exception as error:
raise GraphiteSendException(
"unknown exception while connecting to %s - %s" %
(self.addr, error)
)
return self.socket
def reconnect(self):
self.disconnect()
self.connect()
def autoreconnect(self, sleep=1, attempt=3, exponential=True, jitter=5):
"""
Tries to reconnect with some delay:
exponential=False: up to `attempt` times with `sleep` seconds between
each try
exponential=True: up to `attempt` times with exponential growing `sleep`
and random delay in range 1..`jitter` (exponential backoff)
:param sleep: time to sleep between two attempts to reconnect
:type sleep: float or int
:param attempt: maximal number of attempts
:type attempt: int
:param exponential: if set - use exponential backoff logic
:type exponential: bool
:param jitter: top value of random delay, sec
:type jitter: int
"""
p = 0
while attempt is None or attempt > 0:
try:
self.reconnect()
return True
except GraphiteSendException:
if exponential:
p += 1
time.sleep(pow(sleep, p) + random.randint(1, jitter))
else:
time.sleep(sleep)
attempt -= 1
return False
def clean_metric_name(self, metric_name):
"""
Make sure the metric is free of control chars, spaces, tabs, etc.
"""
return self.formatter.clean_metric_name(metric_name)
def disconnect(self):
"""
Close the TCP connection with the graphite server.
"""
try:
self.socket.shutdown(1)
# If its currently a socket, set it to None
except AttributeError:
self.socket = None
except Exception:
self.socket = None
# Set the self.socket to None, no matter what.
finally:
self.socket = None
def _dispatch_send(self, message):
"""
Dispatch the different steps of sending
"""
if self.dryrun:
return message
if not self.socket:
raise GraphiteSendException(
"Socket was not created before send"
)
sending_function = self._send
if self._autoreconnect:
sending_function = self._send_and_reconnect
try:
if self.asynchronous and gevent:
gevent.spawn(sending_function, message)
else:
sending_function(message)
except Exception as e:
self._handle_send_error(e)
return "sent {0} long message: {1}".format(len(message), message[:75])
def _handle_send_error(self, error):
if isinstance(error, socket.gaierror):
raise GraphiteSendException(
"Failed to send data to %s, with error: %s" %
(self.addr, error))
elif isinstance(error, socket.error):
raise GraphiteSendException(
"Socket closed before able to send data to %s, "
"with error: %s" %
(self.addr, error)
)
else:
raise GraphiteSendException(
"Unknown error while trying to send data down socket to %s, "
"error: %s" %
(self.addr, error)
)
def _send(self, message):
"""
Given a message send it to the graphite server.
"""
self.socket.sendall(message.encode("ascii"))
def _send_and_reconnect(self, message):
"""Send _message_ to Graphite Server and attempt reconnect on failure.
If _autoreconnect_ was specified, attempt to reconnect if first send
fails.
:raises AttributeError: When the socket has not been set.
:raises socket.error: When the socket connection is no longer valid.
"""
try:
self.socket.sendall(message.encode("ascii"))
except (AttributeError, socket.error):
if not self.autoreconnect():
raise
else:
self.socket.sendall(message.encode("ascii"))
def _presend(self, message):
"""
Complete any message alteration tasks before sending to the graphite
server.
"""
return message
def send(self, metric, value, timestamp=None, formatter=None):
"""
Format a single metric/value pair, and send it to the graphite
server.
:param metric: name of the metric
:type prefix: string
:param value: value of the metric
:type prefix: float or int
:param timestmap: epoch time of the event
:type prefix: float or int
:param formatter: option non-default formatter
:type prefix: callable
.. code-block:: python
>>> g = init()
>>> g.send("metric", 54)
.. code-block:: python
>>> g = init()
>>> g.send(metric="metricname", value=73)
"""
if formatter is None:
formatter = self.formatter
message = formatter(metric, value, timestamp)
message = self. _presend(message)
return self._dispatch_send(message)
def send_dict(self, data, timestamp=None, formatter=None):
"""
Format a dict of metric/values pairs, and send them all to the
graphite server.
:param data: key,value pair of metric name and metric value
:type prefix: dict
:param timestmap: epoch time of the event
:type prefix: float or int
:param formatter: option non-default formatter
:type prefix: callable
.. code-block:: python
>>> g = init()
>>> g.send_dict({'metric1': 54, 'metric2': 43, 'metricN': 999})
"""
if formatter is None:
formatter = self.formatter
metric_list = []
for metric, value in data.items():
tmp_message = formatter(metric, value, timestamp)
metric_list.append(tmp_message)
message = "".join(metric_list)
return self._dispatch_send(message)
def send_list(self, data, timestamp=None, formatter=None):
"""
Format a list of set's of (metric, value) pairs, and send them all
to the graphite server.
:param data: list of key,value pairs of metric name and metric value
:type prefix: list
:param timestmap: epoch time of the event
:type prefix: float or int
:param formatter: option non-default formatter
:type prefix: callable
.. code-block:: python
>>> g = init()
>>> g.send_list([('metric1', 54),('metric2', 43, 1384418995)])
"""
if formatter is None:
formatter = self.formatter
if timestamp is None:
timestamp = int(time.time())
else:
timestamp = int(timestamp)
metric_list = []
for metric_info in data:
# Support [ (metric, value, timestamp), ... ] as well as
# [ (metric, value), ... ].
# If the metric_info provides a timestamp then use the timestamp.
# If the metric_info fails to provide a timestamp, use the one
# provided to send_list() or generated on the fly by time.time()
if len(metric_info) == 3:
(metric, value, metric_timestamp) = metric_info
else:
(metric, value) = metric_info
metric_timestamp = timestamp
tmp_message = formatter(metric, value, metric_timestamp)
metric_list.append(tmp_message)
message = "".join(metric_list)
return self._dispatch_send(message)
def enable_asynchronous(self):
"""Check if socket have been monkey patched by gevent"""
def is_monkey_patched():
try:
from gevent import monkey, socket
except ImportError:
return False
if hasattr(monkey, "saved"):
return "socket" in monkey.saved
return gevent.socket.socket == socket.socket
if not is_monkey_patched():
raise Exception("To activate asynchonoucity, please monkey patch"
" the socket module with gevent")
return True
class GraphitePickleClient(GraphiteClient):
def __init__(self, *args, **kwargs):
# If the user has not given a graphite_port, then use the default pick
# port.
if 'graphite_port' not in kwargs:
kwargs['graphite_port'] = default_graphite_pickle_port
# TODO: Fix this hack and use super.
# self = GraphiteClient(*args, **kwargs) # noqa
super(self.__class__, self).__init__(*args, **kwargs)
def str2listtuple(self, string_message):
"Covert a string that is ready to be sent to graphite into a tuple"
if type(string_message).__name__ not in ('str', 'unicode'):
raise TypeError("Must provide a string or unicode")
if not string_message.endswith('\n'):
string_message += "\n"
tpl_list = []
for line in string_message.split('\n'):
line = line.strip()
if not line:
continue
path, metric, timestamp = (None, None, None)
try:
(path, metric, timestamp) = line.split()
except ValueError:
raise ValueError(
"message must contain - metric_name, value and timestamp '%s'"
% line)
try:
timestamp = float(timestamp)
except ValueError:
raise ValueError("Timestamp must be float or int")
tpl_list.append((path, (timestamp, metric)))
if len(tpl_list) == 0:
raise GraphiteSendException("No messages to send")
payload = pickle.dumps(tpl_list)
header = struct.pack("!L", len(payload))
message = header + payload
return message
def _send(self, message):
""" Given a message send it to the graphite server. """
# An option to lowercase the entire message
if self.lowercase_metric_names:
message = message.lower()
# convert the message into a pickled payload.
message = self.str2listtuple(message)
try:
self.socket.sendall(message)
# Capture missing socket.
except socket.gaierror as error:
raise GraphiteSendException(
"Failed to send data to %s, with error: %s" %
(self.addr, error)) # noqa
# Capture socket closure before send.
except socket.error as error:
raise GraphiteSendException(
"Socket closed before able to send data to %s, "
"with error: %s" %
(self.addr, error)) # noqa
except Exception as error:
raise GraphiteSendException(
"Unknown error while trying to send data down socket to %s, "
"error: %s" %
(self.addr, error)) # noqa
return "sent %d long pickled message" % len(message)
def init(init_type='plaintext_tcp', *args, **kwargs):
"""
Create the module instance of the GraphiteClient.
"""
global _module_instance
reset()
validate_init_types = ['plaintext_tcp', 'plaintext', 'pickle_tcp',
'pickle', 'plain']
if init_type not in validate_init_types:
raise GraphiteSendException(
"Invalid init_type '%s', must be one of: %s" %
(init_type, ", ".join(validate_init_types)))
# Use TCP to send data to the plain text receiver on the graphite server.
if init_type in ['plaintext_tcp', 'plaintext', 'plain']:
_module_instance = GraphiteClient(*args, **kwargs)
# Use TCP to send pickled data to the pickle receiver on the graphite
# server.
if init_type in ['pickle_tcp', 'pickle']:
_module_instance = GraphitePickleClient(*args, **kwargs)
return _module_instance
def send(*args, **kwargs):
""" Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
"""
if not _module_instance:
raise GraphiteSendException(
"Must call graphitesend.init() before sending")
_module_instance.send(*args, **kwargs)
return _module_instance
def send_dict(*args, **kwargs):
""" Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
"""
if not _module_instance:
raise GraphiteSendException(
"Must call graphitesend.init() before sending")
_module_instance.send_dict(*args, **kwargs)
return _module_instance
def send_list(*args, **kwargs):
""" Make sure that we have an instance of the GraphiteClient.
Then send the metrics to the graphite server.
User consumable method.
"""
if not _module_instance:
raise GraphiteSendException(
"Must call graphitesend.init() before sending")
_module_instance.send_list(*args, **kwargs)
return _module_instance
def reset():
""" disconnect from the graphite server and destroy the module instance.
"""
global _module_instance
if not _module_instance:
return False
_module_instance.disconnect()
_module_instance = None
def cli():
""" Allow the module to be called from the cli. """
import argparse
parser = argparse.ArgumentParser(description='Send data to graphite')
# Core of the application is to accept a metric and a value.
parser.add_argument('metric', metavar='metric', type=str,
help='name.of.metric')
parser.add_argument('value', metavar='value', type=int,
help='value of metric as int')
args = parser.parse_args()
metric = args.metric
value = args.value
graphitesend_instance = init()
graphitesend_instance.send(metric, value)
if __name__ == '__main__': # pragma: no cover
cli()
|
|
import os
import tempfile
import unittest
from tempfile import TemporaryDirectory
from mkdocs import exceptions
from mkdocs.config import base, defaults
from mkdocs.config.config_options import BaseConfigOption
class ConfigBaseTests(unittest.TestCase):
def test_unrecognised_keys(self):
c = base.Config(schema=defaults.get_schema())
c.load_dict({
'not_a_valid_config_option': "test"
})
failed, warnings = c.validate()
self.assertEqual(warnings, [
('not_a_valid_config_option',
'Unrecognised configuration name: not_a_valid_config_option')
])
def test_missing_required(self):
c = base.Config(schema=defaults.get_schema())
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'site_name')
self.assertEqual(str(errors[0][1]), 'Required configuration not provided.')
self.assertEqual(len(warnings), 0)
def test_load_from_file(self):
"""
Users can explicitly set the config file using the '--config' option.
Allows users to specify a config other than the default `mkdocs.yml`.
"""
temp_dir = TemporaryDirectory()
config_file = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
os.mkdir(os.path.join(temp_dir.name, 'docs'))
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
config_file.close()
cfg = base.load_config(config_file=config_file.name)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
finally:
temp_dir.cleanup()
def test_load_default_file(self):
"""
test that `mkdocs.yml` will be loaded when '--config' is not set.
"""
temp_dir = TemporaryDirectory()
config_file = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
os.mkdir(os.path.join(temp_dir.name, 'docs'))
old_dir = os.getcwd()
try:
os.chdir(temp_dir.name)
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
config_file.close()
cfg = base.load_config(config_file=None)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
finally:
os.chdir(old_dir)
temp_dir.cleanup()
def test_load_default_file_with_yaml(self):
"""
test that `mkdocs.yml` will be loaded when '--config' is not set.
"""
temp_dir = TemporaryDirectory()
config_file = open(os.path.join(temp_dir.name, 'mkdocs.yaml'), 'w')
os.mkdir(os.path.join(temp_dir.name, 'docs'))
old_dir = os.getcwd()
try:
os.chdir(temp_dir.name)
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
config_file.close()
cfg = base.load_config(config_file=None)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
finally:
os.chdir(old_dir)
temp_dir.cleanup()
def test_load_default_file_prefer_yml(self):
"""
test that `mkdocs.yml` will be loaded when '--config' is not set.
"""
temp_dir = TemporaryDirectory()
config_file1 = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
config_file2 = open(os.path.join(temp_dir.name, 'mkdocs.yaml'), 'w')
os.mkdir(os.path.join(temp_dir.name, 'docs'))
old_dir = os.getcwd()
try:
os.chdir(temp_dir.name)
config_file1.write("site_name: MkDocs Test1\n")
config_file1.flush()
config_file1.close()
config_file2.write("site_name: MkDocs Test2\n")
config_file2.flush()
config_file2.close()
cfg = base.load_config(config_file=None)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test1')
finally:
os.chdir(old_dir)
temp_dir.cleanup()
def test_load_from_missing_file(self):
with self.assertRaises(exceptions.ConfigurationError):
base.load_config(config_file='missing_file.yml')
def test_load_from_open_file(self):
"""
`load_config` can accept an open file descriptor.
"""
temp_dir = TemporaryDirectory()
temp_path = temp_dir.name
config_fname = os.path.join(temp_path, 'mkdocs.yml')
config_file = open(config_fname, 'w+')
os.mkdir(os.path.join(temp_path, 'docs'))
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
cfg = base.load_config(config_file=config_file)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
# load_config will always close the file
self.assertTrue(config_file.closed)
finally:
temp_dir.cleanup()
def test_load_from_closed_file(self):
"""
The `serve` command with auto-reload may pass in a closed file descriptor.
Ensure `load_config` reloads the closed file.
"""
temp_dir = TemporaryDirectory()
config_file = open(os.path.join(temp_dir.name, 'mkdocs.yml'), 'w')
os.mkdir(os.path.join(temp_dir.name, 'docs'))
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
config_file.close()
cfg = base.load_config(config_file=config_file)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
finally:
temp_dir.cleanup()
def test_load_from_deleted_file(self):
"""
Deleting the config file could trigger a server reload.
"""
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write("site_name: MkDocs Test\n")
config_file.flush()
config_file.close()
finally:
os.remove(config_file.name)
with self.assertRaises(exceptions.ConfigurationError):
base.load_config(config_file=config_file)
def test_load_missing_required(self):
"""
`site_name` is a required setting.
"""
config_file = tempfile.NamedTemporaryFile('w', delete=False)
try:
config_file.write(
"site_dir: output\nsite_uri: https://www.mkdocs.org\n")
config_file.flush()
config_file.close()
with self.assertRaises(exceptions.Abort):
base.load_config(config_file=config_file.name)
finally:
os.remove(config_file.name)
def test_pre_validation_error(self):
class InvalidConfigOption(BaseConfigOption):
def pre_validation(self, config, key_name):
raise base.ValidationError('pre_validation error')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'invalid_option')
self.assertEqual(str(errors[0][1]), 'pre_validation error')
self.assertTrue(isinstance(errors[0][1], base.ValidationError))
self.assertEqual(len(warnings), 0)
def test_run_validation_error(self):
class InvalidConfigOption(BaseConfigOption):
def run_validation(self, value):
raise base.ValidationError('run_validation error')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'invalid_option')
self.assertEqual(str(errors[0][1]), 'run_validation error')
self.assertTrue(isinstance(errors[0][1], base.ValidationError))
self.assertEqual(len(warnings), 0)
def test_post_validation_error(self):
class InvalidConfigOption(BaseConfigOption):
def post_validation(self, config, key_name):
raise base.ValidationError('post_validation error')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'invalid_option')
self.assertEqual(str(errors[0][1]), 'post_validation error')
self.assertTrue(isinstance(errors[0][1], base.ValidationError))
self.assertEqual(len(warnings), 0)
def test_pre_and_run_validation_errors(self):
""" A pre_validation error does not stop run_validation from running. """
class InvalidConfigOption(BaseConfigOption):
def pre_validation(self, config, key_name):
raise base.ValidationError('pre_validation error')
def run_validation(self, value):
raise base.ValidationError('run_validation error')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0][0], 'invalid_option')
self.assertEqual(str(errors[0][1]), 'pre_validation error')
self.assertTrue(isinstance(errors[0][1], base.ValidationError))
self.assertEqual(errors[1][0], 'invalid_option')
self.assertEqual(str(errors[1][1]), 'run_validation error')
self.assertTrue(isinstance(errors[1][1], base.ValidationError))
self.assertEqual(len(warnings), 0)
def test_run_and_post_validation_errors(self):
""" A run_validation error stops post_validation from running. """
class InvalidConfigOption(BaseConfigOption):
def run_validation(self, value):
raise base.ValidationError('run_validation error')
def post_validation(self, config, key_name):
raise base.ValidationError('post_validation error')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0][0], 'invalid_option')
self.assertEqual(str(errors[0][1]), 'run_validation error')
self.assertTrue(isinstance(errors[0][1], base.ValidationError))
self.assertEqual(len(warnings), 0)
def test_validation_warnings(self):
class InvalidConfigOption(BaseConfigOption):
def pre_validation(self, config, key_name):
self.warnings.append('pre_validation warning')
def run_validation(self, value):
self.warnings.append('run_validation warning')
def post_validation(self, config, key_name):
self.warnings.append('post_validation warning')
c = base.Config(schema=(('invalid_option', InvalidConfigOption()), ))
errors, warnings = c.validate()
self.assertEqual(len(errors), 0)
self.assertEqual(warnings, [
('invalid_option', 'pre_validation warning'),
('invalid_option', 'run_validation warning'),
('invalid_option', 'post_validation warning'),
])
def test_load_from_file_with_relative_paths(self):
"""
When explicitly setting a config file, paths should be relative to the
config file, not the working directory.
"""
config_dir = TemporaryDirectory()
config_fname = os.path.join(config_dir.name, 'mkdocs.yml')
docs_dir = os.path.join(config_dir.name, 'src')
os.mkdir(docs_dir)
config_file = open(config_fname, 'w')
try:
config_file.write("docs_dir: src\nsite_name: MkDocs Test\n")
config_file.flush()
config_file.close()
cfg = base.load_config(config_file=config_file)
self.assertTrue(isinstance(cfg, base.Config))
self.assertEqual(cfg['site_name'], 'MkDocs Test')
self.assertEqual(cfg['docs_dir'], docs_dir)
self.assertEqual(cfg.config_file_path, config_fname)
self.assertIsInstance(cfg.config_file_path, str)
finally:
config_dir.cleanup()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for linker-specific test cases.
The custom dynamic linker can only be tested through a custom test case
for various technical reasons:
- It's an 'invisible feature', i.e. it doesn't expose a new API or
behaviour, all it does is save RAM when loading native libraries.
- Checking that it works correctly requires several things that do not
fit the existing GTest-based and instrumentation-based tests:
- Native test code needs to be run in both the browser and renderer
process at the same time just after loading native libraries, in
a completely asynchronous way.
- Each test case requires restarting a whole new application process
with a different command-line.
- Enabling test support in the Linker code requires building a special
APK with a flag to activate special test-only support code in the
Linker code itself.
Host-driven tests have also been tried, but since they're really
sub-classes of instrumentation tests, they didn't work well either.
To build and run the linker tests, do the following:
ninja -C out/Debug chromium_linker_test_apk
build/android/test_runner.py linker
"""
# pylint: disable=R0201
import logging
import re
from devil.android import device_errors
from devil.android.sdk import intent
from pylib.base import base_test_result
ResultType = base_test_result.ResultType
_PACKAGE_NAME = 'org.chromium.chromium_linker_test_apk'
_ACTIVITY_NAME = '.ChromiumLinkerTestActivity'
_COMMAND_LINE_FILE = '/data/local/tmp/chromium-linker-test-command-line'
# Logcat filters used during each test. Only the 'chromium' one is really
# needed, but the logs are added to the TestResult in case of error, and
# it is handy to have others as well when troubleshooting.
_LOGCAT_FILTERS = ['*:s', 'chromium:v', 'cr.chromium:v',
'cr.chromium_android_linker:v', 'cr.library_loader:v',
'cr.linker_test:v']
#_LOGCAT_FILTERS = ['*:v'] ## DEBUG
# Regular expression used to match status lines in logcat.
_RE_BROWSER_STATUS_LINE = re.compile(r' BROWSER_LINKER_TEST: (FAIL|SUCCESS)$')
_RE_RENDERER_STATUS_LINE = re.compile(r' RENDERER_LINKER_TEST: (FAIL|SUCCESS)$')
def _StartActivityAndWaitForLinkerTestStatus(device, timeout):
"""Force-start an activity and wait up to |timeout| seconds until the full
linker test status lines appear in the logcat, recorded through |device|.
Args:
device: A DeviceUtils instance.
timeout: Timeout in seconds
Returns:
A (status, logs) tuple, where status is a ResultType constant, and logs
if the final logcat output as a string.
"""
# 1. Start recording logcat with appropriate filters.
with device.GetLogcatMonitor(filter_specs=_LOGCAT_FILTERS) as logmon:
# 2. Force-start activity.
device.StartActivity(
intent.Intent(package=_PACKAGE_NAME, activity=_ACTIVITY_NAME),
force_stop=True)
# 3. Wait up to |timeout| seconds until the test status is in the logcat.
result = ResultType.PASS
try:
browser_match = logmon.WaitFor(_RE_BROWSER_STATUS_LINE, timeout=timeout)
logging.debug('Found browser match: %s', browser_match.group(0))
renderer_match = logmon.WaitFor(_RE_RENDERER_STATUS_LINE,
timeout=timeout)
logging.debug('Found renderer match: %s', renderer_match.group(0))
if (browser_match.group(1) != 'SUCCESS'
or renderer_match.group(1) != 'SUCCESS'):
result = ResultType.FAIL
except device_errors.CommandTimeoutError:
result = ResultType.TIMEOUT
return result, '\n'.join(device.adb.Logcat(dump=True))
class LibraryLoadMap(dict):
"""A helper class to pretty-print a map of library names to load addresses."""
def __str__(self):
items = ['\'%s\': 0x%x' % (name, address) for \
(name, address) in self.iteritems()]
return '{%s}' % (', '.join(items))
def __repr__(self):
return 'LibraryLoadMap(%s)' % self.__str__()
class AddressList(list):
"""A helper class to pretty-print a list of load addresses."""
def __str__(self):
items = ['0x%x' % address for address in self]
return '[%s]' % (', '.join(items))
def __repr__(self):
return 'AddressList(%s)' % self.__str__()
class LinkerTestCaseBase(object):
"""Base class for linker test cases."""
def __init__(self, is_modern_linker=False, is_low_memory=False):
"""Create a test case.
Args:
is_modern_linker: True to test ModernLinker, False to test LegacyLinker.
is_low_memory: True to simulate a low-memory device, False otherwise.
"""
self.is_modern_linker = is_modern_linker
if is_modern_linker:
test_suffix = 'ForModernLinker'
else:
test_suffix = 'ForLegacyLinker'
self.is_low_memory = is_low_memory
if is_low_memory:
test_suffix += 'LowMemoryDevice'
else:
test_suffix += 'RegularDevice'
class_name = self.__class__.__name__
self.qualified_name = '%s.%s' % (class_name, test_suffix)
self.tagged_name = self.qualified_name
def _RunTest(self, _device):
"""Run the test, must be overriden.
Args:
_device: A DeviceUtils interface.
Returns:
A (status, log) tuple, where <status> is a ResultType constant, and <log>
is the logcat output captured during the test in case of error, or None
in case of success.
"""
return ResultType.FAIL, 'Unimplemented _RunTest() method!'
def Run(self, device):
"""Run the test on a given device.
Args:
device: Name of target device where to run the test.
Returns:
A base_test_result.TestRunResult() instance.
"""
margin = 8
print '[ %-*s ] %s' % (margin, 'RUN', self.tagged_name)
logging.info('Running linker test: %s', self.tagged_name)
# Create command-line file on device.
if self.is_modern_linker:
command_line_flags = '--use-linker=modern'
else:
command_line_flags = '--use-linker=legacy'
if self.is_low_memory:
command_line_flags += ' --low-memory-device'
device.WriteFile(_COMMAND_LINE_FILE, command_line_flags)
# Run the test.
status, logs = self._RunTest(device)
result_text = 'OK'
if status == ResultType.FAIL:
result_text = 'FAILED'
elif status == ResultType.TIMEOUT:
result_text = 'TIMEOUT'
print '[ %*s ] %s' % (margin, result_text, self.tagged_name)
results = base_test_result.TestRunResults()
results.AddResult(
base_test_result.BaseTestResult(
self.tagged_name,
status,
log=logs))
return results
def __str__(self):
return self.tagged_name
def __repr__(self):
return self.tagged_name
class LinkerSharedRelroTest(LinkerTestCaseBase):
"""A linker test case to check the status of shared RELRO sections.
The core of the checks performed here are pretty simple:
- Clear the logcat and start recording with an appropriate set of filters.
- Create the command-line appropriate for the test-case.
- Start the activity (always forcing a cold start).
- Every second, look at the current content of the filtered logcat lines
and look for instances of the following:
BROWSER_LINKER_TEST: <status>
RENDERER_LINKER_TEST: <status>
where <status> can be either FAIL or SUCCESS. These lines can appear
in any order in the logcat. Once both browser and renderer status are
found, stop the loop. Otherwise timeout after 30 seconds.
Note that there can be other lines beginning with BROWSER_LINKER_TEST:
and RENDERER_LINKER_TEST:, but are not followed by a <status> code.
- The test case passes if the <status> for both the browser and renderer
process are SUCCESS. Otherwise its a fail.
"""
def _RunTest(self, device):
# Wait up to 30 seconds until the linker test status is in the logcat.
return _StartActivityAndWaitForLinkerTestStatus(device, timeout=30)
|
|
import collections
import os.path
import sys
import tempfile
import textwrap
import unittest
from contextlib import ExitStack
from unittest import mock
import pyperf
from pyperf import tests
from pyperf._utils import create_pipe, MS_WINDOWS, shell_quote
def check_args(loops, a, b):
if a != 1:
raise ValueError
if b != 2:
raise ValueError
# number of loops => number of seconds
return loops
Result = collections.namedtuple('Result', 'runner bench stdout')
class TestRunner(unittest.TestCase):
def create_runner(self, args, **kwargs):
# hack to be able to create multiple instances per process
pyperf.Runner._created.clear()
runner = pyperf.Runner(**kwargs)
# disable CPU affinity to not pollute stdout
runner._cpu_affinity = lambda: None
runner.parse_args(args)
return runner
def exec_runner(self, *args, **kwargs):
def fake_timer():
t = fake_timer.value
fake_timer.value += 1.0
return t
fake_timer.value = 0.0
def fake_get_clock_info(clock):
class ClockInfo:
implementation = 'fake_clock'
resolution = 1.0
return ClockInfo()
name = kwargs.pop('name', 'bench')
time_func = kwargs.pop('time_func', None)
runner = self.create_runner(args, **kwargs)
with mock.patch('time.perf_counter', fake_timer):
with mock.patch('time.get_clock_info', fake_get_clock_info):
with tests.capture_stdout() as stdout:
with tests.capture_stderr() as stderr:
if time_func:
bench = runner.bench_time_func(name, time_func)
else:
bench = runner.bench_func(name, check_args, None, 1, 2)
stdout = stdout.getvalue()
stderr = stderr.getvalue()
if '--stdout' not in args:
self.assertEqual(stderr, '')
# check bench_time_func() bench
self.assertIsInstance(bench, pyperf.Benchmark)
self.assertEqual(bench.get_name(), name)
self.assertEqual(bench.get_nrun(), 1)
return Result(runner, bench, stdout)
def test_worker(self):
result = self.exec_runner('--worker', '-l1', '-w1')
self.assertRegex(result.stdout,
r'^bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$')
def test_debug_single_value(self):
result = self.exec_runner('--debug-single-value', '--worker')
self.assertEqual(result.bench.get_nvalue(), 1)
def test_pipe(self):
rpipe, wpipe = create_pipe()
with rpipe:
with wpipe:
arg = wpipe.to_subprocess()
# Don't close the file descriptor, it is closed by
# the Runner class
wpipe._fd = None
result = self.exec_runner('--pipe', str(arg),
'--worker', '-l1', '-w1')
with rpipe.open_text() as rfile:
bench_json = rfile.read()
self.assertEqual(bench_json,
tests.benchmark_as_json(result.bench))
def test_json_exists(self):
with tempfile.NamedTemporaryFile('wb+') as tmp:
with tests.capture_stdout() as stdout:
try:
self.create_runner(['--worker', '-l1', '-w1',
'--output', tmp.name])
except SystemExit as exc:
self.assertEqual(exc.code, 1)
self.assertEqual('ERROR: The JSON file %r already exists'
% tmp.name,
stdout.getvalue().rstrip())
def test_verbose_metadata(self):
result = self.exec_runner('--worker', '-l1', '-w1',
'--verbose', '--metadata')
self.assertRegex(result.stdout,
r'^'
r'(?:Warmup [0-9]+: 1\.00 sec \(loops: [0-9]+, raw: 1\.00 sec\)\n)+'
r'\n'
r'(?:Value [0-9]+: 1\.00 sec\n)+'
r'\n'
r'Metadata:\n'
r'(?:- .*\n)+'
r'\n'
r'bench: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$')
def test_loops_calibration(self):
def time_func(loops):
# number of iterations => number of microseconds
return loops * 1e-6
result = self.exec_runner('--worker', '--calibrate-loops',
'-v', time_func=time_func)
for run in result.bench.get_runs():
self.assertEqual(run.get_total_loops(), 2 ** 17)
expected = textwrap.dedent('''
Warmup 1: 1.00 us (loops: 1, raw: 1.00 us)
Warmup 2: 1.00 us (loops: 2, raw: 2.00 us)
Warmup 3: 1.00 us (loops: 4, raw: 4.00 us)
Warmup 4: 1.00 us (loops: 8, raw: 8.00 us)
Warmup 5: 1.00 us (loops: 16, raw: 16.0 us)
Warmup 6: 1.00 us (loops: 32, raw: 32.0 us)
Warmup 7: 1.00 us (loops: 64, raw: 64.0 us)
Warmup 8: 1.00 us (loops: 128, raw: 128 us)
Warmup 9: 1.00 us (loops: 256, raw: 256 us)
Warmup 10: 1.00 us (loops: 512, raw: 512 us)
Warmup 11: 1.00 us (loops: 1024, raw: 1.02 ms)
Warmup 12: 1.00 us (loops: 2048, raw: 2.05 ms)
Warmup 13: 1.00 us (loops: 4096, raw: 4.10 ms)
Warmup 14: 1.00 us (loops: 8192, raw: 8.19 ms)
Warmup 15: 1.00 us (loops: 2^14, raw: 16.4 ms)
Warmup 16: 1.00 us (loops: 2^15, raw: 32.8 ms)
Warmup 17: 1.00 us (loops: 2^16, raw: 65.5 ms)
Warmup 18: 1.00 us (loops: 2^17, raw: 131 ms)
''').strip()
self.assertIn(expected, result.stdout)
def test_loops_calibration_min_time(self):
def time_func(loops):
# number of iterations => number of microseconds
return loops * 1e-6
result = self.exec_runner('--worker', '--calibrate-loops',
'--min-time', '0.001',
time_func=time_func)
for run in result.bench.get_runs():
self.assertEqual(run.get_total_loops(), 2 ** 10)
def test_json_file(self):
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.json')
result = self.exec_runner('--worker', '-l1', '-w1',
'--output', filename)
loaded = pyperf.Benchmark.load(filename)
tests.compare_benchmarks(self, loaded, result.bench)
def test_time_func_zero(self):
runner = self.create_runner(['--worker', '-l1', '-w1'])
def time_func(loops):
return 0
with self.assertRaises(ValueError) as cm:
runner.bench_time_func('bench', time_func)
self.assertEqual(str(cm.exception),
'benchmark function returned zero')
def test_calibration_zero(self):
runner = self.create_runner(['--worker', '--calibrate-loops'])
def time_func(loops):
return 0
with self.assertRaises(SystemExit):
with tests.capture_stdout() as stdout:
runner.bench_time_func('bench', time_func)
self.assertIn('ERROR: failed to calibrate the number of loops',
stdout.getvalue())
def check_calibrate_loops(self, runner, time_func, warmups):
with tests.capture_stdout():
bench = runner.bench_time_func('bench', time_func)
runs = bench.get_runs()
self.assertEqual(len(runs), 1)
run = runs[0]
self.assertEqual(run.warmups, warmups)
def test_calibrate_loops(self):
args = ['--worker', '-w0', '-n2', '--min-time=1.0',
'--calibrate-loops']
runner = self.create_runner(args)
def time_func(loops):
if loops < 8:
return 0.5
else:
return 1.0
time_func.step = 0
warmups = (
(1, 0.5),
(2, 0.5 / 2),
(4, 0.5 / 4),
# warmup 1: dt >= min_time
(8, 1.0 / 8),
# warmup 2
(8, 1.0 / 8))
self.check_calibrate_loops(runner, time_func, warmups)
def test_calibrate_loops_jit(self):
args = ['--worker', '-w0', '-n2', '--min-time=1.0',
'--calibrate-loops']
runner = self.create_runner(args)
# Simulate PyPy JIT: running the same function becomes faster
# after 2 values while running warmup values
def time_func(loops):
if loops < 16:
return 0
time_func.step += 1
if time_func.step == 1:
return 3.0
elif time_func.step == 2:
return 0.5
else:
return 1.0
time_func.step = 0
warmups = (
# first calibration values are zero
(1, 0.0),
(2, 0.0),
(4, 0.0),
(8, 0.0),
# warmup 1: first non-zero calibration value
(16, 3.0 / 16),
# warmup 2: JIT triggered, dt < min_time,
# double number of loops
(16, 0.5 / 16),
# warmup 3
(32, 1.0 / 32))
self.check_calibrate_loops(runner, time_func, warmups)
def test_recalibrate_loops_jit(self):
args = ['--worker', '-w0', '-n2', '--min-time=1.0',
'--recalibrate-loops', '--loops=16']
runner = self.create_runner(args)
# Simulate PyPy JIT: running the same function becomes faster
# after 2 values while running warmup values
def time_func(loops):
time_func.step += 1
if time_func.step == 1:
return 1.0
elif time_func.step == 2:
return 0.5
else:
return 1.0
time_func.step = 0
warmups = (
# warmup 1
(16, 1.0 / 16),
# warmup 2: JIT optimized code, dt < min_time
# double the number of loops
(16, 0.5 / 16),
# warmup 3, new try with loops x 2
(32, 1.0 / 32))
self.check_calibrate_loops(runner, time_func, warmups)
def test_loops_power(self):
# test 'x^y' syntax for loops
runner = self.create_runner(['--loops', '2^8'])
self.assertEqual(runner.args.loops, 256)
def check_two_benchmarks(self, task=None):
args = ['--worker', '--loops=1', '-w0', '-n3']
if task is not None:
args.append('--worker-task=%s' % task)
runner = self.create_runner(args)
def time_func(loops):
return 1.0
def time_func2(loops):
return 2.0
with tests.capture_stdout():
bench1 = runner.bench_time_func('bench1', time_func)
bench2 = runner.bench_time_func('bench2', time_func2)
return (bench1, bench2)
def test_two_benchmarks(self):
bench1, bench2 = self.check_two_benchmarks()
self.assertEqual(bench1.get_name(), 'bench1')
self.assertEqual(bench1.get_values(), (1.0, 1.0, 1.0))
self.assertEqual(bench2.get_name(), 'bench2')
self.assertEqual(bench2.get_values(), (2.0, 2.0, 2.0))
def test_worker_task(self):
bench1, bench2 = self.check_two_benchmarks(task=0)
self.assertEqual(bench1.get_name(), 'bench1')
self.assertEqual(bench1.get_values(), (1.0, 1.0, 1.0))
self.assertIs(bench2, None)
bench1, bench2 = self.check_two_benchmarks(task=1)
self.assertIs(bench1, None)
self.assertEqual(bench2.get_name(), 'bench2')
self.assertEqual(bench2.get_values(), (2.0, 2.0, 2.0))
bench1, bench2 = self.check_two_benchmarks(task=2)
self.assertIs(bench1, None)
self.assertIs(bench2, None)
def test_show_name(self):
result = self.exec_runner('--worker', '-l1', '-w1', name='NAME')
self.assertRegex(result.stdout,
r'^NAME: Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$')
result = self.exec_runner('--worker', '-l1', '-w1', name='NAME', show_name=False)
self.assertRegex(result.stdout,
r'^Mean \+- std dev: 1\.00 sec \+- 0\.00 sec\n$')
def test_compare_to(self):
def time_func(loops):
return 1.0
def abs_executable(python):
return python
run = pyperf.Run([1.5],
metadata={'name': 'name'},
collect_metadata=False)
bench = pyperf.Benchmark([run])
suite = pyperf.BenchmarkSuite([bench])
with ExitStack() as cm:
def popen(*args, **kw):
mock_popen = mock.Mock()
mock_popen.wait.return_value = 0
return mock_popen
mock_subprocess = cm.enter_context(mock.patch('pyperf._master.subprocess'))
mock_subprocess.Popen.side_effect = popen
cm.enter_context(mock.patch('pyperf._runner.abs_executable',
side_effect=abs_executable))
cm.enter_context(mock.patch('pyperf._master._load_suite_from_pipe',
return_value=suite))
args = ["--python=python3.8", "--compare-to=python3.6", "--min-time=5",
"-p1", "-w3", "-n7", "-l11"]
runner = self.create_runner(args)
with tests.capture_stdout():
runner.bench_time_func('name', time_func)
def popen_call(python):
args = [python, mock.ANY, '--worker',
'--pipe', mock.ANY, '--worker-task=0',
'--values', '7', '--min-time', '5.0',
'--loops', '11', '--warmups', '3']
kw = {}
if MS_WINDOWS:
kw['close_fds'] = False
else:
kw['pass_fds'] = mock.ANY
return mock.call(args, env=mock.ANY, **kw)
call1 = popen_call('python3.6')
call2 = popen_call('python3.8')
mock_subprocess.Popen.assert_has_calls([call1, call2])
def test_parse_args_twice_error(self):
args = ["--worker", '-l1', '-w1']
runner = self.create_runner(args)
with self.assertRaises(RuntimeError):
runner.parse_args(args)
def test_duplicated_named(self):
def time_func(loops):
return 1.0
runner = self.create_runner('-l1 -w0 -n1 --worker'.split())
with tests.capture_stdout():
runner.bench_time_func('optim', time_func)
with self.assertRaises(ValueError) as cm:
runner.bench_time_func('optim', time_func)
self.assertEqual(str(cm.exception),
"duplicated benchmark name: 'optim'")
def test_bench_command(self):
args = [sys.executable, '-c', 'pass']
runner = self.create_runner('-l1 -w0 -n1 --worker'.split())
with tests.capture_stdout():
bench = runner.bench_command('bench', args)
self.assertEqual(bench.get_metadata()['command'],
' '.join(map(shell_quote, args)))
def test_single_instance(self):
runner1 = self.create_runner([]) # noqa
with self.assertRaises(RuntimeError):
runner2 = pyperf.Runner() # noqa
class TestRunnerCPUAffinity(unittest.TestCase):
def create_runner(self, args, **kwargs):
# hack to be able to create multiple instances per process
pyperf.Runner._created.clear()
runner = pyperf.Runner(**kwargs)
runner.parse_args(args)
return runner
def test_cpu_affinity_args(self):
runner = self.create_runner(['-v', '--affinity=3,7'])
with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity:
with tests.capture_stdout() as stdout:
runner._cpu_affinity()
self.assertEqual(runner.args.affinity, '3,7')
self.assertEqual(stdout.getvalue(),
'Pin process to CPUs: 3,7\n')
mock_setaffinity.assert_called_once_with([3, 7])
def test_cpu_affinity_isolcpus(self):
runner = self.create_runner(['-v'])
with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity:
with mock.patch('pyperf._runner.get_isolated_cpus', return_value=[1, 2]):
with tests.capture_stdout() as stdout:
runner._cpu_affinity()
self.assertEqual(runner.args.affinity, '1-2')
self.assertEqual(stdout.getvalue(),
'Pin process to isolated CPUs: 1-2\n')
mock_setaffinity.assert_called_once_with([1, 2])
def test_cpu_affinity_no_isolcpus(self):
runner = self.create_runner(['-v'])
with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity:
with mock.patch('pyperf._runner.get_isolated_cpus', return_value=None):
runner._cpu_affinity()
self.assertFalse(runner.args.affinity)
self.assertEqual(mock_setaffinity.call_count, 0)
if __name__ == "__main__":
unittest.main()
|
|
#
# CORE
# Copyright (c)2011-2015 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# Created on Dec 18, 2014
#
# @author: santiago
#
import os
import pwd
import collections
from core.netns import nodes
from core.api import coreapi
from core.misc.ipaddr import *
from xml.dom.minidom import Document
from xmlutils import *
from xmldeployment import CoreDeploymentWriter
def enum(**enums):
return type('Enum', (), enums)
class Attrib(object):
''' scenario plan attribute constants
'''
NetType = enum(WIRELESS = 'wireless', ETHERNET = 'ethernet',
PTP_WIRED = 'point-to-point-wired',
PTP_WIRELESS = 'point-to-point-wireless')
MembType = enum(INTERFACE = 'interface', CHANNEL = 'channel',
SWITCH = 'switch', HUB = 'hub', TUNNEL = 'tunnel',
NETWORK = "network")
DevType = enum(HOST = 'host', ROUTER = 'router', SWITCH = 'switch',
HUB = 'hub')
NodeType = enum(ROUTER = 'router', HOST = 'host', MDR = 'mdr',
PC = 'PC', RJ45 = 'rj45')
Alias = enum(ID = "COREID")
''' A link endpoint in CORE
net: the network that the endpoint belongs to
netif: the network interface at this end
id: the identifier for the endpoint
l2devport: if the other end is a layer 2 device, this is the assigned port in that device
params: link/interface parameters
'''
Endpoint = collections.namedtuple('Endpoint',
['net', 'netif', 'type', 'id', 'l2devport', 'params'])
class CoreDocumentWriter1(Document):
''' Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init
method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.
'''
def __init__(self, session):
''' Create an empty Scenario XML Document, then populate it with
objects from the given session.
'''
Document.__init__(self)
session.info('Exporting to NMF XML version 1.0')
with session._objslock:
self.scenarioPlan = ScenarioPlan(self, session)
if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
deployment = CoreDeploymentWriter(self, self.scenarioPlan,
session)
deployment.add_deployment()
self.scenarioPlan.setAttribute('deployed', 'true')
def writexml(self, filename):
''' Commit to file
'''
self.scenarioPlan.coreSession.info("saving session XML file %s" % filename)
f = open(filename, "w")
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \
encoding="UTF-8")
f.close()
if self.scenarioPlan.coreSession.user is not None:
uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid
gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid
os.chown(filename, uid, gid)
class XmlElement(object):
''' The base class for all XML elements in the scenario plan. Includes
convenience functions.
'''
def __init__(self, document, parent, elementType):
self.document = document
self.parent = parent
self.baseEle = document.createElement("%s" % elementType)
if self.parent is not None:
self.parent.appendChild(self.baseEle)
def createElement(self, elementTag):
return self.document.createElement(elementTag)
def getTagName(self):
return self.baseEle.tagName
def createTextNode(self, nodeTag):
return self.document.createTextNode(nodeTag)
def appendChild(self, child):
if isinstance(child, XmlElement):
self.baseEle.appendChild(child.baseEle)
else:
self.baseEle.appendChild(child)
@staticmethod
def add_parameter(doc, parent, key, value):
if key and value:
parm = doc.createElement("parameter")
parm.setAttribute("name", str(key))
parm.appendChild(doc.createTextNode(str(value)))
parent.appendChild(parm)
def addParameter(self, key, value):
'''
Add a parameter to the xml element
'''
self.add_parameter(self.document, self, key, value)
def setAttribute(self, name, val):
self.baseEle.setAttribute(name, val)
def getAttribute(self, name):
return self.baseEle.getAttribute(name)
class NamedXmlElement(XmlElement):
''' The base class for all "named" xml elements. Named elements are
xml elements in the scenario plan that have an id and a name attribute.
'''
def __init__(self, scenPlan, parent, elementType, elementName):
XmlElement.__init__(self, scenPlan.document, parent, elementType)
self.scenPlan = scenPlan
self.coreSession = scenPlan.coreSession
elementPath = ''
self.id=None
if self.parent is not None and isinstance(self.parent, XmlElement) and self.parent.getTagName() != "scenario":
elementPath="%s/" % self.parent.getAttribute("id")
self.id = "%s%s" % (elementPath,elementName)
self.setAttribute("name", elementName)
self.setAttribute("id", self.id)
def addPoint(self, coreObj):
''' Add position to an object
'''
(x,y,z) = coreObj.position.get()
if x is None or y is None:
return
lat, lon, alt = self.coreSession.location.getgeo(x, y, z)
pt = self.createElement("point")
pt.setAttribute("type", "gps")
pt.setAttribute("lat", "%s" % lat)
pt.setAttribute("lon", "%s" % lon)
if z:
pt.setAttribute("z", "%s" % alt)
self.appendChild(pt)
def createAlias(self, domain, valueStr):
''' Create an alias element for CORE specific information
'''
a = self.createElement("alias")
a.setAttribute("domain", "%s" % domain)
a.appendChild(self.createTextNode(valueStr))
return a
class ScenarioPlan(XmlElement):
''' Container class for ScenarioPlan.
'''
def __init__(self, document, session):
XmlElement.__init__(self, document, parent=document, elementType='scenario')
self.coreSession = session
self.setAttribute('version', '1.0')
self.setAttribute("name", "%s" % session.name)
self.setAttribute('xmlns', 'nmfPlan')
self.setAttribute('xmlns:CORE', 'coreSpecific')
self.setAttribute('compiled', 'true')
self.allChannelMembers = dict()
self.lastNetIdx = 0
self.addNetworks()
self.addDevices()
# XXX Do we need these?
#self.session.emane.setup() # not during runtime?
#self.addorigin()
self.addDefaultServices()
self.addSessionConfiguration()
def addNetworks(self):
''' Add networks in the session to the scenPlan.
'''
for net in self.coreSession.objs():
if not isinstance(net, nodes.PyCoreNet):
continue
if isinstance(net, nodes.CtrlNet):
continue
# Do not add switches and hubs that belong to another network
if isinstance(net, (nodes.SwitchNode, nodes.HubNode)):
if inOtherNetwork(net):
continue
try:
NetworkElement(self, self, net)
except:
if hasattr(net, "name") and net.name:
self.coreSession.warn('Unsupported net: %s' % net.name)
else:
self.coreSession.warn('Unsupported net: %s' % net.__class__.__name__)
def addDevices(self):
''' Add device elements to the scenario plan.
'''
for node in self.coreSession.objs():
if not isinstance(node, (nodes.PyCoreNode)):
continue
try:
DeviceElement(self, self, node)
except:
if hasattr(node, "name") and node.name:
self.coreSession.warn('Unsupported device: %s' % node.name)
else:
self.coreSession.warn('Unsupported device: %s' % node.__class__.__name__)
def addDefaultServices(self):
''' Add default services and node types to the ServicePlan.
'''
defaultservices = self.createElement("CORE:defaultservices")
for type in self.coreSession.services.defaultservices:
defaults = self.coreSession.services.getdefaultservices(type)
spn = self.createElement("device")
spn.setAttribute("type", type)
defaultservices.appendChild(spn)
for svc in defaults:
s = self.createElement("service")
spn.appendChild(s)
s.setAttribute("name", str(svc._name))
if defaultservices.hasChildNodes():
self.appendChild(defaultservices)
def addSessionConfiguration(self):
''' Add CORE-specific session configuration XML elements.
'''
config = self.createElement("CORE:sessionconfig")
# origin: geolocation of cartesian coordinate 0,0,0
refgeo = self.coreSession.location.refgeo
origin = self.createElement("origin")
attrs = ("lat","lon","alt")
have_origin = False
for i in xrange(3):
if refgeo[i] is not None:
origin.setAttribute(attrs[i], str(refgeo[i]))
have_origin = True
if have_origin:
if self.coreSession.location.refscale != 1.0: # 100 pixels = refscale m
origin.setAttribute("scale100", str(self.coreSession.location.refscale))
if self.coreSession.location.refxyz != (0.0, 0.0, 0.0):
pt = self.createElement("point")
origin.appendChild(pt)
x,y,z = self.coreSession.location.refxyz
coordstxt = "%s,%s" % (x,y)
if z:
coordstxt += ",%s" % z
coords = self.createTextNode(coordstxt)
pt.appendChild(coords)
config.appendChild(origin)
# options
options = self.createElement("options")
defaults = self.coreSession.options.getdefaultvalues()
for i, (k, v) in enumerate(self.coreSession.options.getkeyvaluelist()):
if str(v) != str(defaults[i]):
XmlElement.add_parameter(self.document, options, k, v)
if options.hasChildNodes():
config.appendChild(options)
# hook scripts
hooks = self.createElement("hooks")
for state in sorted(self.coreSession._hooks.keys()):
for (filename, data) in self.coreSession._hooks[state]:
hook = self.createElement("hook")
hook.setAttribute("name", filename)
hook.setAttribute("state", str(state))
txt = self.createTextNode(data)
hook.appendChild(txt)
hooks.appendChild(hook)
if hooks.hasChildNodes():
config.appendChild(hooks)
# metadata
meta = self.createElement("metadata")
for (k, v) in self.coreSession.metadata.items():
XmlElement.add_parameter(self.document, meta, k, v)
if meta.hasChildNodes():
config.appendChild(meta)
if config.hasChildNodes():
self.appendChild(config)
class NetworkElement(NamedXmlElement):
def __init__(self, scenPlan, parent, netObj):
''' Add one PyCoreNet object as one network XML element.
'''
elementName = self.getNetworkName(scenPlan, netObj)
NamedXmlElement.__init__(self, scenPlan, parent, "network", elementName)
self.scenPlan = scenPlan
self.addPoint(netObj)
netType = None
if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)):
netType = Attrib.NetType.WIRELESS
elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode,
nodes.PtpNet, nodes.TunnelNode)):
netType = Attrib.NetType.ETHERNET
else:
netType ="%s" % netObj.__class__.__name__
typeEle = self.createElement("type")
typeEle.appendChild(self.createTextNode(netType))
self.appendChild(typeEle)
# Gather all endpoints belonging to this network
self.endpoints = getEndpoints(netObj)
# Special case for a network of switches and hubs
createAlias = True
self.l2devices = []
if isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)):
createAlias = False
self.appendChild(typeEle)
self.addL2Devices(netObj)
if createAlias:
a = self.createAlias(Attrib.Alias.ID, "%d" % int(netObj.objid))
self.appendChild(a)
# XXXX TODO: Move this to channel?
# key used with tunnel node
if hasattr(netObj, 'grekey') and netObj.grekey is not None:
a = self.createAlias("COREGREKEY", "%s" % netObj.grekey)
self.appendChild(a)
self.addNetMembers(netObj)
self.addChannels(netObj)
presentationEle = self.createElement("CORE:presentation")
addPresentationEle = False
if netObj.icon and not netObj.icon.isspace():
presentationEle.setAttribute("icon", netObj.icon)
addPresentationEle = True
if netObj.canvas:
presentationEle.setAttribute("canvas", str(netObj.canvas))
addPresentationEle = True
if addPresentationEle:
self.appendChild(presentationEle)
def getNetworkName(self, scenPlan, netObj):
''' Determine the name to use for this network element
'''
if isinstance(netObj, (nodes.PtpNet, nodes.TunnelNode)):
name = "net%s" % scenPlan.lastNetIdx
scenPlan.lastNetIdx += 1
elif netObj.name:
name = str(netObj.name) # could use net.brname for bridges?
elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)):
name = "lan%s" % netObj.objid
else:
name = ''
return name
def addL2Devices(self, netObj):
''' Add switches and hubs
'''
# Add the netObj as a device
self.l2devices.append(DeviceElement(self.scenPlan, self, netObj))
# Add downstream switches/hubs
l2devs = []
neweps = []
for ep in self.endpoints:
if ep.type and ep.net.objid != netObj.objid:
l2s, eps = getDowmstreamL2Devices(ep.net)
l2devs.extend(l2s)
neweps.extend(eps)
for l2dev in l2devs:
self.l2devices.append(DeviceElement(self.scenPlan, self, l2dev))
self.endpoints.extend(neweps)
# XXX: Optimize later
def addNetMembers(self, netObj):
''' Add members to a network XML element.
'''
for ep in self.endpoints:
if ep.type:
MemberElement(self.scenPlan, self, referencedType=ep.type, referencedId=ep.id)
if ep.l2devport:
MemberElement(self.scenPlan,
self,
referencedType=Attrib.MembType.INTERFACE,
referencedId="%s/%s" % (self.id,ep.l2devport))
# XXX Revisit this
# Create implied members given the network type
if isinstance(netObj, nodes.TunnelNode):
MemberElement(self.scenPlan,
self,
referencedType=Attrib.MembType.TUNNEL,
referencedId="%s/%s" % (netObj.name, netObj.name))
# XXX: Optimize later
def addChannels(self, netObj):
''' Add channels to a network XML element
'''
if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)):
modelconfigs = netObj.session.mobility.getmodels(netObj)
modelconfigs += netObj.session.emane.getmodels(netObj)
chan = None
for (model, conf) in modelconfigs:
# Handle mobility parameters below
if model._type == coreapi.CORE_TLV_REG_MOBILITY:
continue
# Create the channel
if chan is None:
name = "wireless"
chan = ChannelElement(self.scenPlan, self, netObj,
channelType=model._name,
channelName=name,
channelDomain="CORE")
# Add wireless model parameters
for i, key in enumerate(model.getnames()):
value = conf[i]
if value is not None:
chan.addParameter(key, model.valueof(key, conf))
for (model, conf) in modelconfigs:
if model._type == coreapi.CORE_TLV_REG_MOBILITY:
# Add wireless mobility parameters
mobility = XmlElement(self.scenPlan, chan, "CORE:mobility")
# Add a type child
typeEle = self.createElement("type")
typeEle.appendChild(self.createTextNode(model._name))
mobility.appendChild(typeEle)
for i, key in enumerate(model.getnames()):
value = conf[i]
if value is not None:
mobility.addParameter(key, value)
# Add members to the channel
if chan is not None:
chan.addChannelMembers(self.endpoints)
self.appendChild(chan.baseEle)
elif isinstance(netObj, nodes.PtpNet) :
if len(self.endpoints) < 2:
if len(self.endpoints) == 1:
self.coreSession.warn('Pt2Pt network with only 1 endpoint: %s' % self.endpoints[0].id)
else:
self.coreSession.warn('Pt2Pt network with no endpoints encountered in %s' % netObj.name)
return
name = "chan%d" % (0)
chan = ChannelElement(self.scenPlan, self, netObj,
channelType=Attrib.NetType.ETHERNET,
channelName=name)
# Add interface parameters
if self.endpoints[0].params != self.endpoints[1].params:
self.coreSession.warn('Pt2Pt Endpoint parameters do not match in %s' % netObj.name)
for key, value in self.endpoints[0].params:
# XXX lifted from original addnetem function. revisit this.
# default netem parameters are 0 or None
if value is None or value == 0:
continue
if key == "has_netem" or key == "has_tbf":
continue
chan.addParameter(key, value)
# Add members to the channel
chan.addChannelMembers(self.endpoints)
self.appendChild(chan)
elif isinstance(netObj, (nodes.SwitchNode,
nodes.HubNode, nodes.TunnelNode)):
cidx=0
channels = []
for ep in self.endpoints:
# Create one channel member per ep
if ep.type:
name = "chan%d" % (cidx)
chan = ChannelElement(self.scenPlan, self, netObj,
channelType=Attrib.NetType.ETHERNET,
channelName=name)
# Add interface parameters
for key, value in ep.params:
# XXX lifted from original addnetem function. revisit this.
# default netem parameters are 0 or None
if value is None or value == 0:
continue
if key == "has_netem" or key == "has_tbf":
continue
chan.addParameter(key, value)
# Add members to the channel
chan.addChannelMembers(ep)
channels.append(chan)
cidx += 1
for chan in channels:
self.appendChild(chan)
class DeviceElement(NamedXmlElement):
''' A device element in the scenario plan.
'''
def __init__(self, scenPlan, parent, devObj):
''' Add a PyCoreNode object as a device element.
'''
devType = None
coreDevType = None
if hasattr(devObj, "type") and devObj.type:
coreDevType = devObj.type
if devObj.type == Attrib.NodeType.ROUTER:
devType = Attrib.DevType.ROUTER
elif devObj.type == Attrib.NodeType.MDR:
devType = Attrib.DevType.ROUTER
elif devObj.type == Attrib.NodeType.HOST:
devType = Attrib.DevType.HOST
elif devObj.type == Attrib.NodeType.PC:
devType = Attrib.DevType.HOST
elif devObj.type == Attrib.NodeType.RJ45:
devType = Attrib.DevType.HOST
nodeId = "EMULATOR-HOST"
else:
# Default custom types (defined in ~/.core/nodes.conf) to HOST
devType = Attrib.DevType.HOST
if devType is None:
if isinstance(devObj, nodes.HubNode):
devType = Attrib.DevType.HUB
elif isinstance(devObj, nodes.SwitchNode):
devType = Attrib.DevType.SWITCH
if devType is None:
raise Exception
NamedXmlElement.__init__(self, scenPlan, parent, devType, devObj.name)
if coreDevType is not None:
typeEle = self.createElement("type")
typeEle.setAttribute("domain", "CORE")
typeEle.appendChild(self.createTextNode("%s" % coreDevType))
self.appendChild(typeEle)
self.interfaces = []
self.addInterfaces(devObj)
alias = self.createAlias(Attrib.Alias.ID, "%s" % devObj.objid)
self.appendChild(alias)
self.addPoint(devObj)
self.addServices(devObj)
presentationEle = self.createElement("CORE:presentation")
addPresentationEle = False
if devObj.icon and not devObj.icon.isspace():
presentationEle.setAttribute("icon", devObj.icon)
addPresentationEle = True
if devObj.canvas:
presentationEle.setAttribute("canvas", str(devObj.canvas))
addPresentationEle = True
if addPresentationEle:
self.appendChild(presentationEle)
def addInterfaces(self, devObj):
''' Add interfaces to a device element.
'''
idx=0
for ifcObj in devObj.netifs(sort=True):
if ifcObj.net and isinstance(ifcObj.net, nodes.CtrlNet):
continue
if isinstance(devObj, nodes.PyCoreNode):
ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj)
else: # isinstance(node, (nodes.HubNode nodes.SwitchNode)):
ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj, idx)
idx += 1
netmodel = None
if ifcObj.net:
if hasattr(ifcObj.net, "model"):
netmodel = ifcObj.net.model
if ifcObj.mtu and ifcObj.mtu != 1500:
ifcEle.setAttribute("mtu", "%s" % ifcObj.mtu)
# The interfaces returned for Switches and Hubs are the interfaces of the nodes connected to them.
# The addresses are for those interfaces. Don't include them here.
if isinstance(devObj, nodes.PyCoreNode):
# could use ifcObj.params, transport_type
ifcEle.addAddresses(ifcObj)
# per-interface models
# XXX Remove???
if netmodel and netmodel._name[:6] == "emane_":
cfg = self.coreSession.emane.getifcconfig(devObj.objid, netmodel._name,
None, ifcObj)
if cfg:
ifcEle.addModels(((netmodel, cfg),) )
self.interfaces.append(ifcEle)
def addServices(self, devObj):
''' Add services and their customizations to the ServicePlan.
'''
if not hasattr(devObj, "services") :
return
if len(devObj.services) == 0:
return
defaults = self.coreSession.services.getdefaultservices(devObj.type)
if devObj.services == defaults:
return
spn = self.createElement("CORE:services")
spn.setAttribute("name", devObj.name)
self.appendChild(spn)
for svc in devObj.services:
s = self.createElement("service")
spn.appendChild(s)
s.setAttribute("name", str(svc._name))
s.setAttribute("startup_idx", str(svc._startindex))
if svc._starttime != "":
s.setAttribute("start_time", str(svc._starttime))
# only record service names if not a customized service
if not svc._custom:
continue
s.setAttribute("custom", str(svc._custom))
addelementsfromlist(self, s, svc._dirs, "directory", "name")
for fn in svc._configs:
if len(fn) == 0:
continue
f = self.createElement("file")
f.setAttribute("name", fn)
# all file names are added to determine when a file has been deleted
s.appendChild(f)
data = self.coreSession.services.getservicefiledata(svc, fn)
if data is None:
# this includes only customized file contents and skips
# the auto-generated files
continue
txt = self.createTextNode("\n" + data)
f.appendChild(txt)
addtextelementsfromlist(self, s, svc._startup, "command",
(("type","start"),))
addtextelementsfromlist(self, s, svc._shutdown, "command",
(("type","stop"),))
addtextelementsfromlist(self, s, svc._validate, "command",
(("type","validate"),))
class ChannelElement(NamedXmlElement):
''' A channel element in the scenario plan
'''
def __init__(self, scenPlan, parent, netObj, channelType, channelName, channelDomain=None):
NamedXmlElement.__init__(self, scenPlan, parent, "channel", channelName)
'''
Create a channel element and append a member child referencing this channel element
in the parent element.
'''
# Create a member element for this channel in the parent
MemberElement(self.scenPlan,
parent,
referencedType=Attrib.MembType.CHANNEL,
referencedId=self.id)
# Add a type child
typeEle = self.createElement("type")
if channelDomain is not None:
typeEle.setAttribute("domain", "%s" % channelDomain)
typeEle.appendChild(self.createTextNode(channelType))
self.appendChild(typeEle)
def addChannelMembers(self, endpoints):
'''
Add network channel members referencing interfaces in the channel
'''
if isinstance(endpoints, list):
# A list of endpoints is given. Create one channel member per endpoint
idx = 0
for ep in endpoints:
self.addChannelMember(ep.type, ep.id, idx)
idx += 1
else:
# A single endpoint is given. Create one channel member for the endpoint,
# and if the endpoint is associated with a Layer 2 device port, add the
# port as a second member
ep = endpoints
self.addChannelMember(ep.type, ep.id, 0)
if ep.l2devport is not None:
memId = "%s/%s" % (self.parent.getAttribute("id"), ep.l2devport)
self.addChannelMember(ep.type, memId, 1)
def addChannelMember(self, memIfcType, memIfcId, memIdx):
'''
add a member to a given channel
'''
m = MemberElement(self.scenPlan,
self,
referencedType=memIfcType,
referencedId=memIfcId,
index=memIdx)
self.scenPlan.allChannelMembers[memIfcId] = m
class InterfaceElement(NamedXmlElement):
'''
A network interface element
'''
def __init__(self, scenPlan, parent, devObj, ifcObj, ifcIdx=None):
'''
Create a network interface element with references to channel that this
interface is used.
'''
elementName=None
if ifcIdx is not None:
elementName = "e%d" % ifcIdx
else:
elementName = ifcObj.name
NamedXmlElement.__init__(self, scenPlan, parent, "interface", elementName)
self.ifcObj = ifcObj
self.addChannelReference()
def addChannelReference(self):
'''
Add a reference to the channel that uses this interface
'''
try:
cm = self.scenPlan.allChannelMembers[self.id]
if cm is not None:
ch = cm.baseEle.parentNode
if ch is not None:
net = ch.parentNode
if net is not None:
MemberElement(self.scenPlan,
self,
referencedType=Attrib.MembType.CHANNEL,
referencedId=ch.getAttribute("id"),
index=int(cm.getAttribute("index")))
MemberElement(self.scenPlan,
self,
referencedType=Attrib.MembType.NETWORK,
referencedId=net.getAttribute("id"))
except KeyError:
pass # Not an error. This occurs when an interface belongs to a switch or a hub within a network and the channel is yet to be defined
def addAddresses(self, ifcObj):
'''
Add MAC and IP addresses to interface XML elements.
'''
if ifcObj.hwaddr:
h = self.createElement("address")
self.appendChild(h)
h.setAttribute("type", "mac")
htxt = self.createTextNode("%s" % ifcObj.hwaddr)
h.appendChild(htxt)
for addr in ifcObj.addrlist:
a = self.createElement("address")
self.appendChild(a)
(ip, sep, mask) = addr.partition('/')
# mask = int(mask) XXX?
if isIPv4Address(ip):
a.setAttribute("type", "IPv4")
else:
a.setAttribute("type", "IPv6")
# a.setAttribute("type", )
atxt = self.createTextNode("%s" % addr)
a.appendChild(atxt)
# XXX Remove?
def addModels(self, configs):
'''
Add models from a list of model-class, config values tuples.
'''
for (m, conf) in configs:
modelEle = self.createElement("model")
modelEle.setAttribute("name", m._name)
typeStr = "wireless"
if m._type == coreapi.CORE_TLV_REG_MOBILITY:
typeStr = "mobility"
modelEle.setAttribute("type", typeStr)
for i, k in enumerate(m.getnames()):
key = self.createElement(k)
value = conf[i]
if value is None:
value = ""
key.appendChild(self.createTextNode("%s" % value))
modelEle.appendChild(key)
self.appendChild(modelEle)
class MemberElement(XmlElement):
'''
Member elements are references to other elements in the network plan elements of the scenario.
They are used in networks to reference channels, in channels to reference interfaces,
and in interfaces to reference networks/channels. Member elements provided allow bi-directional
traversal of network plan components.
'''
def __init__(self, scenPlan, parent, referencedType, referencedId, index=None):
'''
Create a member element
'''
XmlElement.__init__(self, scenPlan.document, parent, "member")
self.setAttribute("type", "%s" % referencedType)
# See'Understanding the Network Modeling Framework document'
if index is not None:
self.setAttribute("index", "%d" % index)
self.appendChild(self.createTextNode("%s" % referencedId))
#
# =======================================================================================
# Helpers
# =======================================================================================
def getEndpoint(netObj, ifcObj):
'''
Create an Endpoint object given the network and the interface of interest
'''
ep = None
l2devport=None
# if ifcObj references an interface of a node and is part of this network
if ifcObj.net.objid == netObj.objid and hasattr(ifcObj,'node') and ifcObj.node:
params = ifcObj.getparams()
if isinstance(ifcObj.net, (nodes.HubNode, nodes.SwitchNode)):
l2devport="%s/e%d" % (ifcObj.net.name, ifcObj.net.getifindex(ifcObj))
ep = Endpoint(netObj,
ifcObj,
type = Attrib.MembType.INTERFACE,
id="%s/%s" % (ifcObj.node.name, ifcObj.name),
l2devport=l2devport,
params=params)
# else if ifcObj references another node and is connected to this network
elif hasattr(ifcObj,"othernet"):
if ifcObj.othernet.objid == netObj.objid:
# #hack used for upstream parameters for link between switches
# #(see LxBrNet.linknet())
ifcObj.swapparams('_params_up')
params = ifcObj.getparams()
ifcObj.swapparams('_params_up')
owner = ifcObj.net
l2devport="%s/e%d" % (ifcObj.othernet.name, ifcObj.othernet.getifindex(ifcObj))
# Create the endpoint.
# XXX the interface index might not match what is shown in the gui. For switches and hubs,
# The gui assigns its index but doesn't pass it to the daemon and vice versa.
# The gui stores it's index in the IMN file, which it reads and writes without daemon intervention.
# Fix this!
ep = Endpoint(owner,
ifcObj,
type = Attrib.MembType.INTERFACE,
id="%s/%s/e%d" % (netObj.name, owner.name, owner.getifindex(ifcObj)),
l2devport=l2devport,
params=params)
# else this node has an interface that belongs to another network
# i.e. a switch/hub interface connected to another switch/hub and CORE has the other switch/hub
# as the containing network
else :
ep = Endpoint(netObj, ifcObj,type=None, id=None, l2devport=None, params=None)
return ep
def getEndpoints(netObj):
'''
Gather all endpoints of the given network
'''
# Get all endpoints
endpoints = []
# XXX TODO: How to represent physical interfaces.
#
# NOTE: The following code works except it would be missing physical (rj45) interfaces from Pt2pt links
# TODO: Fix data in net.netifs to include Pt2Pt physical interfaces
#
# Iterate through all the nodes in the scenario, then iterate through all the interface for each node,
# and check if the interface is connected to this network.
for ifcObj in netObj.netifs(sort=True):
try:
ep = getEndpoint(netObj, ifcObj)
if ep is not None:
endpoints.append(ep)
except Exception:
pass
return endpoints
def getDowmstreamL2Devices(netObj):
'''
Helper function for getting a list of all downstream layer 2 devices from the given netObj
'''
l2devObjs = [netObj]
allendpoints = []
myendpoints = getEndpoints(netObj)
allendpoints.extend(myendpoints)
for ep in myendpoints:
if ep.type and ep.net.objid != netObj.objid:
l2s, eps = getDowmstreamL2Devices(ep.net)
l2devObjs.extend(l2s)
allendpoints.extend(eps)
return l2devObjs, allendpoints
def getAllNetworkInterfaces(session):
'''
Gather all network interfacecs in the session
'''
netifs = []
for node in session.objs():
for netif in node.netifs(sort=True):
if netif not in netifs:
netifs.append(netif)
return netifs
def inOtherNetwork(netObj):
'''
Determine if CORE considers a given network object to be part of another network.
Note: CORE considers layer 2 devices to be their own networks. However, if a l2 device
is connected to another device, it is possible that one of its ports belong to the other
l2 device's network (thus, "othernet").
'''
for netif in netObj.netifs(sort=True):
if hasattr(netif,"othernet"):
if netif.othernet.objid != netObj.objid:
return True
return False
|
|
# -*- encoding: utf-8 -*-
import re
import copy
import operator
from collections import OrderedDict
try:
from functools import reduce
except ImportError:
pass
from django.core.exceptions import FieldDoesNotExist
from django.template.loader import render_to_string
from django.db.models import QuerySet
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
from .compat import python_2_unicode_compatible
import six
from .exceptions import ColumnError, SkipRecord
from .columns import (Column, TextColumn, DateColumn, DateTimeColumn, BooleanColumn, IntegerColumn,
FloatColumn, DisplayColumn, CompoundColumn, get_column_for_modelfield)
from .utils import (OPTION_NAME_MAP, MINIMUM_PAGE_LENGTH, contains_plural_field, split_terms,
resolve_orm_path)
from .cache import DEFAULT_CACHE_TYPE, cache_types, get_cache_key, cache_data, get_cached_data
def pretty_name(name):
if not name:
return ''
return name[0].capitalize() + name[1:]
# Borrowed from the Django forms implementation
def columns_for_model(model, fields=None, exclude=None, labels=None, processors=None,
unsortable=None, hidden=None):
field_list = []
opts = model._meta
for f in sorted(opts.fields):
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
column_class = get_column_for_modelfield(f)
if column_class is None:
raise ColumnError("Unhandled model field %r." % (f,))
if labels and f.name in labels:
label = labels[f.name]
else:
label = f.verbose_name
if processors and f.name in processors:
processor = processors[f.name]
else:
processor = None
if unsortable and f.name in unsortable:
sortable = False
else:
sortable = True
if hidden and f.name in hidden:
visible = False
else:
visible = True
label = (labels or {}).get(f.name, pretty_name(f.verbose_name))
column = column_class(sources=[f.name], label=label, processor=processor, sortable=sortable,
visible=visible)
column.name = f.name
field_list.append((f.name, column))
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if (not exclude) or (exclude and f not in exclude)]
)
return field_dict
# Borrowed from the Django forms implementation
def get_declared_columns(bases, attrs, with_base_columns=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_columns' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
local_columns = [
(column_name, attrs.pop(column_name))
for column_name, obj in list(six.iteritems(attrs))
if isinstance(obj, Column)
]
local_columns.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's columns.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of columns.
if with_base_columns:
for base in bases[::-1]:
if hasattr(base, 'base_columns'):
local_columns = list(six.iteritems(base.base_columns)) + local_columns
else:
for base in bases[::-1]:
if hasattr(base, 'declared_columns'):
local_columns = list(six.iteritems(base.declared_columns)) + local_columns
return OrderedDict(local_columns)
class DatatableOptions(object):
"""
Contains declarable options for a datatable, some of which can be manipuated by subsequent
requests by the user.
"""
def __init__(self, options=None):
# Non-mutable; server's declared preference is final
self.model = getattr(options, 'model', None)
self.columns = getattr(options, 'columns', None) # table headers
self.exclude = getattr(options, 'exclude', None)
self.search_fields = getattr(options, 'search_fields', None) # extra searchable ORM fields
self.unsortable_columns = getattr(options, 'unsortable_columns', None)
self.hidden_columns = getattr(options, 'hidden_columns', None) # generated, but hidden
self.labels = getattr(options, 'labels', None)
self.processors = getattr(options, 'processors', None)
self.request_method = getattr(options, 'request_method', 'GET')
self.structure_template = getattr(
options, 'structure_template', "datatableview/default_structure.html")
self.footer = getattr(options, 'footer', False)
self.result_counter_id = getattr(options, 'result_counter_id', 'id_count')
# Non-mutable; server behavior customization
self.cache_type = getattr(options, 'cache_type', cache_types.NONE)
self.cache_queryset_count = getattr(options, 'cache_queryset_count', False)
# Mutable by the request
self.ordering = getattr(options, 'ordering', None) # override to Model._meta.ordering
self.page_length = getattr(options, 'page_length', 25) # length of a single result page
default_options = DatatableOptions()
class DatatableMetaclass(type):
"""
Each declared Datatable object inspects its declared "fields" in order to facilitate an
inheritance system resembling the django.forms system. Except for our custom Meta options that
offer field options ('labels', 'processors', etc), this code is essentially a clone of the
django.forms strategy.
"""
def __new__(cls, name, bases, attrs):
declared_columns = get_declared_columns(bases, attrs, with_base_columns=False)
new_class = super(DatatableMetaclass, cls).__new__(cls, name, bases, attrs)
opts = new_class._meta = new_class.options_class(getattr(new_class, 'Meta', None))
if opts.model:
columns = columns_for_model(opts.model, opts.columns, opts.exclude, opts.labels,
opts.processors, opts.unsortable_columns, opts.hidden_columns)
none_model_columns = [k for k, v in six.iteritems(columns) if not v]
missing_columns = set(none_model_columns) - set(declared_columns.keys())
for name, column in declared_columns.items():
column.name = name
# if not column.sources:
# column.sources = [name]
if not column.label:
try:
field = resolve_orm_path(opts.model, name)
except FieldDoesNotExist:
label = name
else:
label = field.verbose_name
column.label = pretty_name(label)
# Normalize declared 'search_fields' to Column instances
if isinstance(opts.search_fields, dict):
# Turn a dictionary of {name: ColumnClass} to just a list of classes.
# If only the column class reference is given instead of an instance, instantiate
# the object first.
search_fields = []
for name, column in opts.search_fields.items():
if callable(column):
column = column(sources=[name])
search_fields.append(column)
opts.search_fields = search_fields
elif opts.search_fields is None:
opts.search_fields = []
else:
opts.search_fields = list(opts.search_fields)
for i, column in enumerate(opts.search_fields):
# Build a column object
if isinstance(column, six.string_types):
name = column
field = resolve_orm_path(opts.model, name)
column = get_column_for_modelfield(field)
opts.search_fields[i] = column(sources=[name])
columns.update(declared_columns)
else:
columns = declared_columns
missing_columns = []
new_class.declared_columns = declared_columns
new_class.base_columns = columns
new_class.missing_columns = missing_columns
return new_class
@python_2_unicode_compatible
class Datatable(six.with_metaclass(DatatableMetaclass)):
"""
Declaration container for a clientside datatable, containing an optional Meta inner class,
class-level field declarations, and callbacks for filtering and post-processing values requested
by the client.
Client options sent over AJAX GET parameters will override the settings given in the inner Meta
class.
This object can be sent to a template context and rendered there in order to generate an
annotated HTML frame for the javascript to initialize.
"""
options_class = DatatableOptions
def __init__(self, object_list, url, view=None, callback_target=None, model=None,
query_config=None, force_distinct=True, **kwargs):
self.object_list = object_list
self.url = url
self.view = view
self.forward_callback_target = callback_target
self.model = model or self._meta.model
if self.model is None and hasattr(object_list, 'model'):
self.model = object_list.model
if query_config is None:
query_config = {}
self.query_config = query_config
self.columns = copy.deepcopy(self.base_columns)
self._force_distinct = force_distinct
self.total_initial_record_count = None
self.unpaged_record_count = None
def configure(self):
"""
Combines (in order) the declared/inherited inner Meta, any view options, and finally any
valid AJAX GET parameters from client modifications to the data they see.
"""
if hasattr(self, '_configured'):
return
self.resolve_virtual_columns(*tuple(self.missing_columns))
self.config = self.normalize_config(self._meta.__dict__, self.query_config)
self.config['column_searches'] = {}
for i, name in enumerate(self.columns.keys()):
column_search = self.query_config.get(OPTION_NAME_MAP['search_column'] % i, None)
if column_search:
self.config['column_searches'][name] = column_search
column_order = list(self.columns.keys())
if self.config['ordering']:
for i, name in enumerate(self.config['ordering']):
column_name = name.lstrip('-+')
try:
index = column_order.index(column_name)
except ValueError:
# It is important to ignore a bad ordering name, since the model.Meta may
# specify a field name that is not present on the datatable columns list.
continue
self.columns[column_name].sort_priority = i
self.columns[column_name].sort_direction = 'desc' if name[0] == '-' else 'asc'
self.columns[column_name].index = index
self._configured = True
# Client request configuration mergers
def normalize_config(self, config, query_config):
"""
Merge the declared configuration with whatever valid query parameters are found from the
client's AJAX request.
"""
# Core options, not modifiable by client updates
if config['hidden_columns'] is None:
config['hidden_columns'] = []
if config['search_fields'] is None:
config['search_fields'] = []
if config['unsortable_columns'] is None:
config['unsortable_columns'] = []
config['search'] = self.normalize_config_search(config, query_config)
config['start_offset'] = self.normalize_config_start_offset(config, query_config)
config['page_length'] = self.normalize_config_page_length(config, query_config)
config['ordering'] = self.normalize_config_ordering(config, query_config)
self._ordering_columns = self.ensure_ordering_columns(config['ordering'])
return config
def normalize_config_search(self, config, query_config):
terms_string = query_config.get(OPTION_NAME_MAP['search'], '').strip()
return set(split_terms(terms_string))
def normalize_config_start_offset(self, config, query_config):
try:
start_offset = query_config.get(OPTION_NAME_MAP['start_offset'], 0)
start_offset = int(start_offset)
except ValueError:
start_offset = 0
else:
if start_offset < 0:
start_offset = 0
return start_offset
def normalize_config_page_length(self, config, query_config):
try:
page_length = query_config.get(OPTION_NAME_MAP['page_length'], config['page_length'])
page_length = int(page_length)
except ValueError:
page_length = config['page_length']
else:
if page_length == -1: # dataTables' way of asking for all items, no pagination
pass
elif page_length < MINIMUM_PAGE_LENGTH:
page_length = MINIMUM_PAGE_LENGTH
return page_length
def normalize_config_ordering(self, config, query_config):
default_ordering = config['ordering']
if default_ordering is None and config['model']:
default_ordering = config['model']._meta.ordering
sort_declarations = [k for k in query_config if re.match(r'^order\[\d+\]\[column\]$', k)]
# Default sorting from view or model definition
if len(sort_declarations) == 0:
return default_ordering
ordering = []
columns_list = list(self.columns.values())
for sort_queue_i in range(len(columns_list)):
try:
column_index = int(query_config.get(
OPTION_NAME_MAP['sort_column'] % sort_queue_i, ''))
except ValueError:
continue
column = columns_list[column_index]
# Reject requests for unsortable columns
if column.name in config['unsortable_columns']:
continue
sort_direction = query_config.get(
OPTION_NAME_MAP['sort_column_direction'] % sort_queue_i, None)
if sort_direction == 'asc':
sort_modifier = ''
elif sort_direction == 'desc':
sort_modifier = '-'
else:
# Aggressively skip invalid specification
continue
ordering.append('%s%s' % (sort_modifier, column.name))
if not ordering:
return default_ordering
return ordering
def ensure_ordering_columns(self, ordering_names):
if ordering_names is None:
return {}
# Normalize declared 'ordering' to Column instances
ordering_columns = {}
for i, name in enumerate(ordering_names):
if name[0] in '+-':
name = name[1:]
if name not in self.columns:
field = resolve_orm_path(self.model, name)
column = get_column_for_modelfield(field)
ordering_columns[name] = column(sources=[name])
return ordering_columns
def resolve_virtual_columns(self, *names):
"""
Called with ``*args`` from the Meta.columns declaration that don't match the model's known
fields. This method can inspect these names and decide what to do with them in special
scenarios, but by default, they are simply raised in an exception to notify the developer of
an apparent configuration error.
"""
if names:
raise ColumnError("Unknown column name(s): %r" % (names,))
# Reflection methods for wrapped columns
def get_ordering_splits(self):
"""
Returns a 2-tuple of database-sortable and non-database-sortable column names. The first
list ends when the first non-db column is found. It is therefore possible that
``virtual_fields`` contains subsequent real db-backed fields, but because arrangement of the
ordering fields matters, we can't respect those until manual ordering has been done on the
intervening non-db fields.
"""
if self.config['ordering'] is None:
return [], []
db_fields = []
virtual_fields = []
i = 0
for i, name in enumerate(self.config['ordering']):
if name[0] in '+-':
name = name[1:]
if name in self.columns:
column = self.columns[name]
else:
column = self._ordering_columns[name]
if not column.get_db_sources(self.model):
break
else:
i = len(self.config['ordering'])
return self.config['ordering'][:i], self.config['ordering'][i:]
def get_db_splits(self):
""" Legacy utility for fetching the database columns and non-database columns. """
db_fields = []
virtual_fields = []
for name, column in self.columns.items():
if column.get_db_sources():
db_fields.append(name)
else:
virtual_fields.append(name)
return db_fields, virtual_fields
# Data retrieval
def will_load_from_cache(self, **kwargs):
"""
Returns a hint for external code concerning the presence of cache data for the given kwargs.
See :py:meth:`.get_cache_key_kwargs` for information concerning the kwargs you must send for
this hint to be accurate.
"""
cached_data = self.get_cached_data(datatable_class=self.__class__, **kwargs)
return (type(cached_data) is not type(None))
def get_cache_key_kwargs(self, view=None, user=None, **kwargs):
"""
Returns the dictionary of kwargs that will be sent to :py:meth:`.get_cache_key` in order to
generate a deterministic cache key.
``datatable_class``, ``view``, and ``user`` are returned by default, the user being looked
up on the view's ``request`` attribute.
Override this classmethod in order to add or remove items from the returned dictionary if
you need a more specific or less specific cache key.
"""
# Try to get user information if 'user' param is missing
if hasattr(view, 'request') and not user:
user = view.request.user
kwargs.update({
'datatable_class': self.__class__,
'view': view,
'user': user,
})
return kwargs
def get_cache_key(self, **kwargs):
"""
Returns the full cache key used for object_list data handled by this datatable class.
``settings.DATATABLEVIEW_CACHE_PREFIX`` will be prepended to this value.
The kwargs sent guarantee a deterministic cache key between requests.
``view`` and ``user`` are special kwargs that the caching system provides by default. The
view instance is inspected for its ``__module__.__name__`` string, and the user for its
``pk``.
All other kwargs are hashed and appended to the cache key.
"""
return get_cache_key(**kwargs)
def get_cached_data(self, **kwargs):
""" Returns object_list data cached for the given kwargs. """
return get_cached_data(self, **kwargs)
def cache_data(self, data, **kwargs):
""" Caches object_list data for the given kwargs. """
cache_data(self, data=data, **kwargs)
def get_object_list(self):
"""
Returns a cached object list if configured and available. When no caching strategy is
enabled or if the cached item is expired, the original ``object_list`` is returned.
"""
# Initial object_list from constructor, before filtering or ordering.
object_list = self.object_list
# Consult cache, if enabled
cache_type = self.config['cache_type']
if cache_type == cache_types.DEFAULT:
cache_type = DEFAULT_CACHE_TYPE
if cache_type:
cache_kwargs = self.get_cache_key_kwargs(view=self.view)
cached_data = self.get_cached_data(**cache_kwargs)
# If no cache is available, simplify and store the original object_list
if cached_data is None:
cached_data = self.prepare_object_list_for_cache(cache_type, object_list)
self.cache_data(cached_data, **cache_kwargs)
object_list = self.expand_object_list_from_cache(cache_type, cached_data)
return object_list
def prepare_object_list_for_cache(self, cache_type, object_list):
"""
Pre-caching hook that must prepare ``object_list`` for the cache using the strategy
indicated by ``cache_type``, which is the table's ``Meta``
:py:attr:`~datatableview.datatables.Meta.cache_type` value.
When ``cache_type`` is ``SIMPLE``, the ``object_list`` is returned unmodified.
When ``PK_LIST`` is used, ``object_list`` is queried for the list of ``pk`` values and those
are returned instead.
"""
data = object_list
# Create the simplest reproducable query for repeated operations between requests
# Note that 'queryset' cache_type is unhandled so that it passes straight through.
if cache_type == cache_types.PK_LIST:
model = object_list.model
data = tuple(object_list.values_list('pk', flat=True))
# Objects in some other type of data structure should be pickable for cache backend
return data
def expand_object_list_from_cache(self, cache_type, cached_data):
"""
Deserializes the ``cached_data`` fetched from the caching backend, according to the
``cache_type`` strategy that was used to originally store it.
When ``cache_type`` is ``SIMPLE``, the ``cached_data`` is returned unmodified, since the
``object_list`` went into the cache unmodified.
When ``PK_LIST`` is used, ``cached_data`` is treated as a list of ``pk`` values and is used
to filter the model's default queryset to just those objects.
"""
if cache_type == cache_types.PK_LIST:
# Convert pk list back into queryset
data = self.model.objects.filter(pk__in=cached_data)
else:
# Straight passthrough of cached items
data = cached_data
return data
def _get_current_page(self):
"""
If page_length is specified in the options or AJAX request, the result list is shortened to
the correct offset and length. Paged or not, the finalized object_list is then returned.
"""
# Narrow the results to the appropriate page length for serialization
if self.config['page_length'] != -1:
i_begin = self.config['start_offset']
i_end = self.config['start_offset'] + self.config['page_length']
object_list = self._records[i_begin:i_end]
else:
object_list = self._records
return object_list
def get_records(self):
"""
Calls :py:meth:`.populate_records` to apply searches and sorting to the object list, then
extracts the applicate page of results, calling :py:meth:`.get_record_data` for each result
in the page.
Returns the final list of processed results.
"""
if not hasattr(self, '_records'):
self.populate_records()
page_data = []
for obj in self._get_current_page():
try:
record_data = self.get_record_data(obj)
except SkipRecord:
pass
else:
page_data.append(record_data)
return page_data
def populate_records(self):
"""
Searches and sorts the original object list. Even though these operations do not themselves
generate queries, the original object list will be counted and the filtered object list will
also be counted, meaning that this is the method is guaranteed to run queries.
No paging will take place at this stage!
"""
if not hasattr(self, 'config'):
self.configure()
self._records = None
base_objects = self.get_object_list()
filtered_objects = self.search(base_objects)
filtered_objects = self.sort(filtered_objects)
self._records = filtered_objects
num_total, num_filtered = self.count_objects(base_objects, filtered_objects)
self.total_initial_record_count = num_total
self.unpaged_record_count = num_filtered
def count_objects(self, base_objects, filtered_objects):
"""
Calculates object totals for datatable footer. Returns a 2-tuple of counts for,
respectively, the total number of objects and the filtered number of objects.
Up to two ``COUNT`` queries may be issued. If you already have heavy backend queries, this
might add significant overhead to every ajax fetch, such as keystroke filters.
If ``Meta.cache_type`` is configured and ``Meta.cache_queryset_count`` is set to True, the
resulting counts will be stored in the caching backend.
"""
num_total = None
num_filtered = None
if isinstance(base_objects, QuerySet):
if self.config['cache_queryset_count']:
cache_kwargs = self.get_cache_key_kwargs(view=self.view, __num_total='__num_total')
num_total = self.get_cached_data(**cache_kwargs)
if num_total is None:
num_total = base_objects.count()
if self.config['cache_queryset_count']:
self.cache_data(num_total, **cache_kwargs)
else:
num_total = len(base_objects)
if len(self.config['search']) > 0:
if isinstance(filtered_objects, QuerySet):
num_filtered = filtered_objects.count()
else:
num_filtered = len(filtered_objects)
else:
num_filtered = num_total
return num_total, num_filtered
def search(self, queryset):
""" Performs db-only queryset searches. """
table_queries = []
searches = {}
# Add per-column searches where necessary
for name, term in self.config['column_searches'].items():
for term in set(split_terms(term)):
columns = searches.setdefault(term, {})
columns[name] = self.columns[name]
# Global search terms apply to all columns
for term in self.config['search']:
# NOTE: Allow global terms to overwrite identical queries that were single-column
searches[term] = self.columns.copy()
searches[term].update({None: column for column in self.config['search_fields']})
for term in searches.keys():
term_queries = []
for name, column in searches[term].items():
if name is None: # config.search_fields items
search_f = self._search_column
else:
search_f = getattr(self, 'search_%s' % (name,), self._search_column)
q = search_f(column, term)
if q is not None:
term_queries.append(q)
if term_queries:
table_queries.append(reduce(operator.or_, term_queries))
if table_queries:
q = reduce(operator.and_, table_queries)
queryset = queryset.filter(q)
return queryset.distinct()
def _search_column(self, column, terms):
""" Requests search queries to be performed against the target column. """
return column.search(self.model, terms)
def sort(self, queryset):
"""
Performs db-only queryset sorts, then applies manual sorts if required.
"""
fields = []
db, virtual = self.get_ordering_splits()
for name in db:
sort_direction = ''
if name[0] in '+-':
sort_direction = name[0]
if sort_direction == '+':
sort_direction = ''
name = name[1:]
if name in self.columns:
column = self.columns[name]
else:
column = self._ordering_columns[name]
sources = column.get_sort_fields(self.model)
if sources:
fields.extend([(sort_direction + source) for source in sources])
object_list = queryset.order_by(*fields)
# When sorting a plural relationship field, we get duplicate rows for each item on the other
# end of that relationship, which can't be removed with a call to distinct().
if self._force_distinct and contains_plural_field(self.model, fields):
object_list = self.force_distinct(object_list)
if virtual:
# Have to sort the whole queryset by hand!
object_list = list(object_list)
def flatten(value):
if isinstance(value, (list, tuple)):
return flatten(value[0])
return value
for name in virtual[::-1]: # stable sorting, top priority sort comes last
reverse = False
if name[0] in '+-':
reverse = (name[0] == '-')
name = name[1:]
column = self.columns[name]
object_list.sort(key=lambda o: flatten(column.value(o)[0]), reverse=reverse)
return object_list
def force_distinct(self, object_list):
seen = set()
def is_unseen(obj):
if obj.pk in seen:
return False
seen.add(obj.pk)
return True
return tuple(obj for obj in object_list if is_unseen(obj))
# Per-record callbacks
def preload_record_data(self, obj):
"""
An empty hook for doing something with a result ``obj`` before column lookups are called
against the object. The dict of items returned will be passed as keyword arguments to any
available column ``processor`` callbacks.
Use this to look up expensive data once per record so that it can be shared between column
processors.
By default, this method also inspects the originating view for a method of the same name,
giving it an opportunity to contribute to the preloaded data.
"""
kwargs = {}
if self.forward_callback_target and \
hasattr(self.forward_callback_target, 'preload_record_data'):
kwargs.update(self.forward_callback_target.preload_record_data(obj))
return kwargs
def get_object_pk(self, obj):
""" Returns the object's ``pk`` value. """
return obj.pk
def get_extra_record_data(self, obj):
""" Returns a dictionary of JSON-friendly data sent to the client as ``"DT_RowData"``. """
data = {}
if self.forward_callback_target and \
hasattr(self.forward_callback_target, 'get_extra_record_data'):
data.update(self.forward_callback_target.get_extra_record_data(obj))
return data
def get_record_data(self, obj):
"""
Returns a dict of column data that will be given to the view for final serialization. The
key names in this dict are not finalized at this stage, but all of the data is present.
Each column is consulted for its value (computed based on its
:py:attr:`~datatableview.columns.Column.sources` applied against the given ``obj`` instance)
and then sent to the column's :py:attr:`~datatableview.columns.Column.processor` function.
"""
preloaded_kwargs = self.preload_record_data(obj)
data = {
'pk': self.get_object_pk(obj),
'_extra_data': self.get_extra_record_data(obj),
}
for i, (name, column) in enumerate(self.columns.items()):
kwargs = dict(column.get_processor_kwargs(**preloaded_kwargs), **{
'datatable': self,
'view': self.view,
'field_name': column.name,
})
value = self.get_column_value(obj, column, **kwargs)
processor = self.get_processor_method(column, i)
if processor:
value = processor(obj, default_value=value[0], rich_value=value[1], **kwargs)
# A 2-tuple at this stage has presumably served its purpose in the processor callback,
# so we convert it to its "rich" value for display purposes.
if isinstance(value, (tuple, list)):
value = value[1]
if six.PY2 and isinstance(value, str): # not unicode
value = value.decode('utf-8')
if value is not None:
value = six.text_type(value)
data[str(i)] = value
return data
def get_column_value(self, obj, column, **kwargs):
""" Returns whatever the column derived as the source value. """
return column.value(obj, **kwargs)
def get_processor_method(self, column, i):
"""
Using a slightly mangled version of the column's name (explained below) each column's value
is derived.
Each field can generate customized data by defining a method on the view called either
"get_column_FIELD_NAME_data" or "get_column_INDEX_data".
If the FIELD_NAME approach is used, the name is the raw field name (e.g., "street_name") or
else the friendly representation defined in a 2-tuple such as
("Street name", "subdivision__home__street_name"), where the name has non-alphanumeric
characters stripped to single underscores. For example, the friendly name
"Region: Subdivision Type" would convert to "Region_Subdivision_Type", requiring the method
name "get_column_Region_Subdivision_Type_data".
Alternatively, if the INDEX approach is used, a method will be fetched called
"get_column_0_data", or otherwise using the 0-based index of the column's position as
defined in the view's ``datatable_options['columns']`` setting.
Finally, if a third element is defined in the tuple, it will be treated as the function or
name of a member attribute which will be used directly.
"""
callback = column.processor
if callback:
if callable(callback):
return callback
if self.forward_callback_target:
f = getattr(self.forward_callback_target, callback, None)
else:
f = None
if f:
return f
return getattr(self, callback)
column_name = column.name
if isinstance(self, LegacyDatatable):
name = force_text(column.label, errors="ignore")
if not name:
name = column.sources[0]
column_name = re.sub(r'[\W_]+', '_', name)
if self.forward_callback_target:
f = getattr(self.forward_callback_target, 'get_column_%s_data' % (column_name,), None)
if f:
return f
f = getattr(self.forward_callback_target, 'get_column_%d_data' % (i,), None)
if f:
return f
f = getattr(self, 'get_column_%s_data' % (column_name,), None)
if f:
return f
f = getattr(self, 'get_column_%d_data' % (i,), None)
if f:
return f
return None
# Template rendering features
def __str__(self):
""" Renders ``structure_template`` with ``self`` as a context variable. """
if not hasattr(self, 'config'):
self.configure()
context = {
'url': self.url,
'config': self.config,
'datatable': self,
'columns': self.columns.values(),
}
return render_to_string(self.config['structure_template'], context)
def __iter__(self):
""" Yields each column in order. """
if not hasattr(self, 'config'):
self.configure()
for column in self.columns.values():
yield column
class ValuesDatatable(Datatable):
"""
Variant of the standard Datatable that terminates its queryset with ``.values()`` provides the
results to any column :py:attr:`~datatableview.columns.Column.processor` callbacks for
additional modification.
Processor callbacks will no longer receive model instances, but instead the dict of selected
values.
"""
def get_valuesqueryset(self, queryset):
# Figure out the full list of ORM path names
self.value_queries = OrderedDict({'pk': 'pk'})
for name, column in self.columns.items():
self.value_queries.update(OrderedDict([
(source, name) for source in column.sources
]))
return queryset.values(*self.value_queries.keys())
def populate_records(self):
"""
Switches the original queryset to a ``ValuesQuerySet``, selecting values according to what
each column has declared in its :py:attr:`~datatableview.columns.Column.sources` list.
"""
self.object_list = self.get_valuesqueryset(self.object_list)
super(ValuesDatatable, self).populate_records()
def get_object_pk(self, obj):
"""
Correctly reads the pk from the ValuesQuerySet entry, as a dict item instead of an
attribute.
"""
return obj['pk']
def preload_record_data(self, obj):
"""
Modifies the ``obj`` values dict to alias the selected values to the column name that asked
for its selection.
For example, a datatable that declares a column ``'blog'`` which has a related lookup source
``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys
``blog__name`` and ``blog`` (the former because that was how it was selected, the latter
because that was the column name used to select it).
:Example:
``{'pk': 1, 'blog__name': "My Blog"}``
``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}``
When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column
name's entry in ``obj`` will be a list of each of those values.
:Example:
``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}``
``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}``
In every situation, the original selected values will always be retained in ``obj``.
"""
data = {}
for orm_path, column_name in self.value_queries.items():
value = obj[orm_path]
if column_name not in data:
data[column_name] = value
else:
if not isinstance(data[column_name], (tuple, list)):
data[column_name] = [data[column_name]]
data[column_name].append(value)
obj.update(data)
return super(ValuesDatatable, self).preload_record_data(obj)
class LegacyDatatable(Datatable):
"""
Modifies the :py:meth:`.resolve_virtual_columns` hook to deal with legacy-style column
declarations, rather than automatically raising them as errors like normal.
:py:class:`~datatableview.views.legacy.LegacyDatatableView` automatically uses this
as its :py:attr:`~datatableview.views.legacy.LegacyDatatableView.datatable_class`.
"""
def resolve_virtual_columns(self, *names):
"""
Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns
instances to match the behavior of the old syntax.
"""
from .views.legacy import get_field_definition
virtual_columns = {}
for name in names:
field = get_field_definition(name)
column = TextColumn(sources=field.fields, label=field.pretty_name,
processor=field.callback)
column.name = field.pretty_name if field.pretty_name else field.fields[0]
virtual_columns[name] = column
# Make sure it's in the same order as originally defined
new_columns = OrderedDict()
for name in self._meta.columns: # Can't use self.config yet, hasn't been generated
if self.columns.get(name):
column = self.columns[name]
else:
column = virtual_columns[name]
new_columns[column.name] = column
self.columns = new_columns
class ValuesLegacyDatatable(LegacyDatatable, ValuesDatatable):
"""
A :py:class:`LegacyDatatable` that also inherits from :py:class:`ValuesDatatable`
"""
|
|
#!/usr/bin/env python
# encoding: utf-8
'''
Created on Aug 4, 2016
@author: Yusuke Kawatsu
'''
# built-in modules.
import calendar
import datetime
# my modules.
from server.util import connect_db
class CostStore(object):
'''
return & store estimated charge.
'''
def __init__(self, aws_access_key_id):
'''
Constructor
'''
self._aws_access_key_id = aws_access_key_id
def awsDataTransfer(self):
return self._get_all(u'AWSDataTransfer')
def putAwsDataTransfer(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AWSDataTransfer', value, timestamp)
def awsQueueService(self):
return self._get_all(u'AWSQueueService')
def putAwsQueueService(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AWSQueueService', value, timestamp)
def amazonEC2(self):
return self._get_all(u'AmazonEC2')
def putAmazonEC2(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonEC2', value, timestamp)
def amazonES(self):
return self._get_all(u'AmazonES')
def putAmazonES(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonES', value, timestamp)
def amazonElastiCache(self):
return self._get_all(u'AmazonElastiCache')
def putAmazonElastiCache(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonElastiCache', value, timestamp)
def amazonRDS(self):
return self._get_all(u'AmazonRDS')
def putAmazonRDS(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonRDS', value, timestamp)
def amazonRoute53(self):
return self._get_all(u'AmazonRoute53')
def putAmazonRoute53(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonRoute53', value, timestamp)
def amazonS3(self):
return self._get_all(u'AmazonS3')
def putAmazonS3(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonS3', value, timestamp)
def amazonSNS(self):
return self._get_all(u'AmazonSNS')
def putAmazonSNS(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'AmazonSNS', value, timestamp)
def awskms(self):
return self._get_all(u'awskms')
def putAwskms(self, value, timestamp):
'''
:param value: any value.
:param datetime.datetime timestamp: timestamp.
'''
return self._put_daily_data(u'awskms', value, timestamp)
def _get_all(self, service_name):
with connect_db() as db:
cursor = db.cursor()
cursor.execute(
u'select aws_access_key_id, timestamp, value from service_costs where service_name=? and aws_access_key_id=? order by timestamp',
(service_name, self._aws_access_key_id, ))
mid = [ {
u'aws_access_key_id': row[0],
u'timestamp': row[1],
u'value': row[2]
} for row in cursor.fetchall() ]
ret = map(lambda d: dict(d, timestamp=datetime.datetime.utcfromtimestamp(d[u'timestamp'])), mid)
return ret
def _put_daily_data(self, service_name, value, timestamp):
unixtime = calendar.timegm(timestamp.utctimetuple())
with connect_db() as db:
db.execute(u'insert into service_costs values (null, ?, ?, ?, ?)', (service_name, self._aws_access_key_id, unixtime, value, ))
db.commit()
class MonthlyCostStore(object):
'''
return costs for each month.
'''
def __init__(self, inner):
'''
Constructor
'''
assert isinstance(inner, CostStore)
self._inner = inner
def awsDataTransfer(self, *args, **kwargs):
return self._filter_monthly( self._inner.awsDataTransfer(*args, **kwargs) )
def awsQueueService(self, *args, **kwargs):
return self._filter_monthly( self._inner.awsQueueService(*args, **kwargs) )
def amazonEC2(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonEC2(*args, **kwargs) )
def amazonES(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonES(*args, **kwargs) )
def amazonElastiCache(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonElastiCache(*args, **kwargs) )
def amazonRDS(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonRDS(*args, **kwargs) )
def amazonRoute53(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonRoute53(*args, **kwargs) )
def amazonS3(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonS3(*args, **kwargs) )
def amazonSNS(self, *args, **kwargs):
return self._filter_monthly( self._inner.amazonSNS(*args, **kwargs) )
def awskms(self, *args, **kwargs):
return self._filter_monthly( self._inner.awskms(*args, **kwargs) )
def _filter_monthly(self, data):
mid = sorted(data, reverse=True, key=lambda e: e[u'timestamp'])
def _gen(seq):
prev = datetime.datetime(1970, 1, 1, 0, 0, 0)
for e in seq:
dt = e[u'timestamp']
if dt.year == prev.year and dt.month == prev.month:
continue
prev = dt
yield e
mid = list(_gen(mid))
mid.reverse()
return mid
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Security Group action implementations"""
import argparse
from cliff import columns as cliff_columns
from osc_lib.command import command
from osc_lib import utils
from osc_lib.utils import tags as _tag
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import utils as network_utils
def _format_network_security_group_rules(sg_rules):
# For readability and to align with formatting compute security group
# rules, trim keys with caller known (e.g. security group and tenant ID)
# or empty values.
for sg_rule in sg_rules:
empty_keys = [k for k, v in sg_rule.items() if not v]
for key in empty_keys:
sg_rule.pop(key)
sg_rule.pop('security_group_id', None)
sg_rule.pop('project_id', None)
return utils.format_list_of_dicts(sg_rules)
def _format_compute_security_group_rule(sg_rule):
info = network_utils.transform_compute_security_group_rule(sg_rule)
# Trim parent security group ID since caller has this information.
info.pop('parent_group_id', None)
# Trim keys with empty string values.
keys_to_trim = [
'ip_protocol',
'ip_range',
'port_range',
'remote_security_group',
]
for key in keys_to_trim:
if key in info and not info[key]:
info.pop(key)
return utils.format_dict(info)
def _format_compute_security_group_rules(sg_rules):
rules = []
for sg_rule in sg_rules:
rules.append(_format_compute_security_group_rule(sg_rule))
return utils.format_list(rules, separator='\n')
class NetworkSecurityGroupRulesColumn(cliff_columns.FormattableColumn):
def human_readable(self):
return _format_network_security_group_rules(self._value)
class ComputeSecurityGroupRulesColumn(cliff_columns.FormattableColumn):
def human_readable(self):
return _format_compute_security_group_rules(self._value)
_formatters_network = {
'security_group_rules': NetworkSecurityGroupRulesColumn,
}
_formatters_compute = {
'rules': ComputeSecurityGroupRulesColumn,
}
def _get_columns(item):
# We still support Nova managed security groups, where we have tenant_id.
column_map = {
'security_group_rules': 'rules',
'tenant_id': 'project_id',
}
hidden_columns = ['location']
return utils.get_osc_show_columns_for_sdk_resource(
item,
column_map,
hidden_columns
)
# TODO(abhiraut): Use the SDK resource mapped attribute names once the
# OSC minimum requirements include SDK 1.0.
class CreateSecurityGroup(common.NetworkAndComputeShowOne,
common.NeutronCommandWithExtraArgs):
_description = _("Create a new security group")
def update_parser_common(self, parser):
parser.add_argument(
"name",
metavar="<name>",
help=_("New security group name")
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("Security group description")
)
return parser
def update_parser_network(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=self.enhance_help_neutron(_("Owner's project (name or ID)"))
)
stateful_group = parser.add_mutually_exclusive_group()
stateful_group.add_argument(
"--stateful",
action='store_true',
default=None,
help=_("Security group is stateful (Default)")
)
stateful_group.add_argument(
"--stateless",
action='store_true',
default=None,
help=_("Security group is stateless")
)
identity_common.add_project_domain_option_to_parser(
parser, enhance_help=self.enhance_help_neutron)
_tag.add_tag_option_to_parser_for_create(
parser, _('security group'),
enhance_help=self.enhance_help_neutron)
return parser
def _get_description(self, parsed_args):
if parsed_args.description is not None:
return parsed_args.description
else:
return parsed_args.name
def take_action_network(self, client, parsed_args):
# Build the create attributes.
attrs = {}
attrs['name'] = parsed_args.name
attrs['description'] = self._get_description(parsed_args)
if parsed_args.stateful:
attrs['stateful'] = True
if parsed_args.stateless:
attrs['stateful'] = False
if parsed_args.project is not None:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
attrs['project_id'] = project_id
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
# Create the security group and display the results.
obj = client.create_security_group(**attrs)
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
display_columns, property_columns = _get_columns(obj)
data = utils.get_item_properties(
obj,
property_columns,
formatters=_formatters_network
)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
description = self._get_description(parsed_args)
obj = client.api.security_group_create(
parsed_args.name,
description,
)
display_columns, property_columns = _get_columns(obj)
data = utils.get_dict_properties(
obj,
property_columns,
formatters=_formatters_compute
)
return (display_columns, data)
class DeleteSecurityGroup(common.NetworkAndComputeDelete):
_description = _("Delete security group(s)")
# Used by base class to find resources in parsed_args.
resource = 'group'
r = None
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
nargs="+",
help=_("Security group(s) to delete (name or ID)"),
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(self.r, ignore_missing=False)
client.delete_security_group(obj)
def take_action_compute(self, client, parsed_args):
client.api.security_group_delete(self.r)
# TODO(rauta): Use the SDK resource mapped attribute names once
# the OSC minimum requirements include SDK 1.0.
class ListSecurityGroup(common.NetworkAndComputeLister):
_description = _("List security groups")
FIELDS_TO_RETRIEVE = ['id', 'name', 'description', 'project_id', 'tags']
def update_parser_network(self, parser):
if not self.is_docs_build:
# Maintain and hide the argument for backwards compatibility.
# Network will always return all projects for an admin.
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=argparse.SUPPRESS,
)
parser.add_argument(
'--project',
metavar='<project>',
help=self.enhance_help_neutron(
_("List security groups according to the project (name or "
"ID)"))
)
identity_common.add_project_domain_option_to_parser(
parser, enhance_help=self.enhance_help_neutron)
_tag.add_tag_filtering_option_to_parser(
parser, _('security group'),
enhance_help=self.enhance_help_neutron)
return parser
def update_parser_compute(self, parser):
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=self.enhance_help_nova_network(
_("Display information from all projects (admin only)"))
)
return parser
def take_action_network(self, client, parsed_args):
filters = {}
if parsed_args.project:
identity_client = self.app.client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain,
).id
filters['project_id'] = project_id
_tag.get_tag_filtering_args(parsed_args, filters)
data = client.security_groups(fields=self.FIELDS_TO_RETRIEVE,
**filters)
columns = (
"ID",
"Name",
"Description",
"Project ID",
"tags"
)
column_headers = (
"ID",
"Name",
"Description",
"Project",
"Tags"
)
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
def take_action_compute(self, client, parsed_args):
search = {'all_tenants': parsed_args.all_projects}
data = client.api.security_group_list(
# TODO(dtroyer): add limit, marker
search_opts=search,
)
columns = (
"ID",
"Name",
"Description",
)
column_headers = columns
if parsed_args.all_projects:
columns = columns + ('Tenant ID',)
column_headers = column_headers + ('Project',)
return (column_headers,
(utils.get_dict_properties(
s, columns,
) for s in data))
class SetSecurityGroup(common.NetworkAndComputeCommand,
common.NeutronCommandWithExtraArgs):
_description = _("Set security group properties")
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Security group to modify (name or ID)")
)
parser.add_argument(
'--name',
metavar='<new-name>',
help=_("New security group name")
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("New security group description")
)
stateful_group = parser.add_mutually_exclusive_group()
stateful_group.add_argument(
"--stateful",
action='store_true',
default=None,
help=_("Security group is stateful (Default)")
)
stateful_group.add_argument(
"--stateless",
action='store_true',
default=None,
help=_("Security group is stateless")
)
return parser
def update_parser_network(self, parser):
_tag.add_tag_option_to_parser_for_set(
parser, _('security group'),
enhance_help=self.enhance_help_neutron)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
attrs = {}
if parsed_args.name is not None:
attrs['name'] = parsed_args.name
if parsed_args.description is not None:
attrs['description'] = parsed_args.description
if parsed_args.stateful:
attrs['stateful'] = True
if parsed_args.stateless:
attrs['stateful'] = False
attrs.update(
self._parse_extra_properties(parsed_args.extra_properties))
# NOTE(rtheis): Previous behavior did not raise a CommandError
# if there were no updates. Maintain this behavior and issue
# the update.
client.update_security_group(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_set(client, obj, parsed_args)
def take_action_compute(self, client, parsed_args):
data = client.api.security_group_find(parsed_args.group)
if parsed_args.name is not None:
data['name'] = parsed_args.name
if parsed_args.description is not None:
data['description'] = parsed_args.description
# NOTE(rtheis): Previous behavior did not raise a CommandError
# if there were no updates. Maintain this behavior and issue
# the update.
client.api.security_group_set(
data,
data['name'],
data['description'],
)
class ShowSecurityGroup(common.NetworkAndComputeShowOne):
_description = _("Display security group details")
def update_parser_common(self, parser):
parser.add_argument(
'group',
metavar='<group>',
help=_("Security group to display (name or ID)")
)
return parser
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
display_columns, property_columns = _get_columns(obj)
data = utils.get_item_properties(
obj,
property_columns,
formatters=_formatters_network
)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
obj = client.api.security_group_find(parsed_args.group)
display_columns, property_columns = _get_columns(obj)
data = utils.get_dict_properties(
obj,
property_columns,
formatters=_formatters_compute
)
return (display_columns, data)
class UnsetSecurityGroup(command.Command):
_description = _("Unset security group properties")
def get_parser(self, prog_name):
parser = super(UnsetSecurityGroup, self).get_parser(prog_name)
parser.add_argument(
'group',
metavar="<group>",
help=_("Security group to modify (name or ID)")
)
_tag.add_tag_option_to_parser_for_unset(parser, _('security group'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_unset(client, obj, parsed_args)
|
|
#!/usr/bin/env python
"""Distutils based setup script for SymPy.
This uses Distutils (http://python.org/sigs/distutils-sig/) the standard
python mechanism for installing packages. For the easiest installation
just type the command (you'll probably need root privileges for that):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py clean -> will clean all trash (*.pyc and stuff)
python setup.py test -> will run the complete test suite
python setup.py bench -> will run the complete benchmark suite
python setup.py audit -> will run pyflakes checker on source code
To get a full list of avaiable commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the sympy list at
[email protected] and ask for help.
"""
from distutils.core import setup
from distutils.core import Command
from distutils.command.build_scripts import build_scripts
import sys
import subprocess
import os
import sympy
PY3 = sys.version_info[0] > 2
# Make sure I have the right Python version.
if sys.version_info[:2] < (2, 6):
print("SymPy requires Python 2.6 or newer. Python %d.%d detected" % sys.version_info[:2])
sys.exit(-1)
# Check that this list is uptodate against the result of the command:
# for i in `find sympy -name __init__.py | rev | cut -f 2- -d '/' | rev | egrep -v "^sympy$" | egrep -v "tests$" `; do echo "'${i//\//.}',"; done | sort
modules = [
'sympy.assumptions',
'sympy.assumptions.handlers',
'sympy.categories',
'sympy.combinatorics',
'sympy.concrete',
'sympy.core',
'sympy.crypto',
'sympy.diffgeom',
'sympy.external',
'sympy.functions',
'sympy.functions.combinatorial',
'sympy.functions.elementary',
'sympy.functions.special',
'sympy.galgebra',
'sympy.geometry',
'sympy.integrals',
'sympy.interactive',
'sympy.liealgebras',
'sympy.logic',
'sympy.logic.algorithms',
'sympy.logic.utilities',
'sympy.matrices',
'sympy.matrices.expressions',
'sympy.mpmath',
'sympy.mpmath.calculus',
'sympy.mpmath.functions',
'sympy.mpmath.libmp',
'sympy.mpmath.matrices',
'sympy.ntheory',
'sympy.parsing',
'sympy.physics',
'sympy.physics.hep',
'sympy.physics.mechanics',
'sympy.physics.quantum',
'sympy.plotting',
'sympy.plotting.intervalmath',
'sympy.plotting.pygletplot',
'sympy.polys',
'sympy.polys.agca',
'sympy.polys.domains',
'sympy.printing',
'sympy.printing.pretty',
'sympy.series',
'sympy.sets',
'sympy.simplify',
'sympy.solvers',
'sympy.statistics',
'sympy.stats',
'sympy.strategies',
'sympy.strategies.branch',
'sympy.tensor',
'sympy.unify',
'sympy.utilities',
'sympy.utilities.mathml',
]
class audit(Command):
"""Audits SymPy's source code for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit SymPy source with PyFlakes"
user_options = []
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print("In order to run the audit, you need to have PyFlakes installed.")
sys.exit(-1)
# We don't want to audit external dependencies
ext = ('mpmath',)
dirs = (os.path.join(*d) for d in
(m.split('.') for m in modules) if d[1] not in ext)
warns = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print("Audit finished with total %d warnings" % warns)
class clean(Command):
"""Cleans *.pyc and debian trashs, so you should get the same copy as
is in the VCS.
"""
description = "remove build files"
user_options = [("all", "a", "the same")]
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
os.system("py.cleanup")
os.system("rm -f python-build-stamp-2.4")
os.system("rm -f MANIFEST")
os.system("rm -rf build")
os.system("rm -rf dist")
os.system("rm -rf doc/_build")
class test_sympy(Command):
"""Runs all tests under the sympy/ folder
"""
description = "run all tests and doctests; also see bin/test and bin/doctest"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
sympy.utilities.runtests.run_all_tests()
class run_benchmarks(Command):
"""Runs all SymPy benchmarks"""
description = "run all benchmarks"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
# we use py.test like architecture:
#
# o collector -- collects benchmarks
# o runner -- executes benchmarks
# o presenter -- displays benchmarks results
#
# this is done in sympy.utilities.benchmarking on top of py.test
def run(self):
from sympy.utilities import benchmarking
benchmarking.main(['sympy'])
cmdclass = {'test': test_sympy,
'bench': run_benchmarks,
'clean': clean,
'audit': audit}
# Check that this list is uptodate against the result of the command:
# $ python bin/generate_test_list.py
tests = [
'sympy.assumptions.tests',
'sympy.categories.tests',
'sympy.combinatorics.tests',
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.crypto.tests',
'sympy.diffgeom.tests',
'sympy.external.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.galgebra.tests',
'sympy.geometry.tests',
'sympy.integrals.tests',
'sympy.interactive.tests',
'sympy.liealgebras.tests',
'sympy.logic.tests',
'sympy.matrices.expressions.tests',
'sympy.matrices.tests',
'sympy.mpmath.tests',
'sympy.ntheory.tests',
'sympy.parsing.tests',
'sympy.physics.mechanics.tests',
'sympy.physics.quantum.tests',
'sympy.physics.tests',
'sympy.plotting.intervalmath.tests',
'sympy.plotting.pygletplot.tests',
'sympy.plotting.tests',
'sympy.polys.agca.tests',
'sympy.polys.domains.tests',
'sympy.polys.tests',
'sympy.printing.pretty.tests',
'sympy.printing.tests',
'sympy.series.tests',
'sympy.sets.tests',
'sympy.simplify.tests',
'sympy.solvers.tests',
'sympy.statistics.tests',
'sympy.stats.tests',
'sympy.strategies.branch.tests',
'sympy.strategies.tests',
'sympy.tensor.tests',
'sympy.unify.tests',
'sympy.utilities.tests',
]
classifiers = [
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
]
long_description = '''SymPy is a Python library for symbolic mathematics. It aims
to become a full-featured computer algebra system (CAS) while keeping the code
as simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external libraries.'''
setup_args = {
"name": 'sympy',
"version": sympy.__version__,
"description": 'Computer algebra system (CAS) in Python',
"long_description": long_description,
"author": 'SymPy development team',
"author_email": '[email protected]',
"license": 'BSD',
"keywords": "Math CAS",
"url": 'http://code.google.com/p/sympy',
"packages": ['sympy'] + modules + tests,
"scripts": ['bin/isympy'],
"ext_modules": [],
"package_data": { 'sympy.utilities.mathml': ['data/*.xsl'] },
"data_files": [('share/man/man1', ['doc/man/isympy.1'])],
"cmdclass": cmdclass,
"classifiers": classifiers,
}
setup(**setup_args)
|
|
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Ground Segment Simulation GUI *
#******************************************************************************
import tkinter
from tkinter import filedialog
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import GRND.IF
import SCOS.ENV
import UI.TKI
#############
# constants #
#############
COLOR_BUTTON_FG = "#FFFFFF"
COLOR_BUTTON_BG = "#808080"
COLOR_INITIALISED = "#FFFF00"
COLOR_CONNECTED = "#00FF00"
COLOR_ON_OK = "#00FF00"
COLOR_ON_NOK = "#FF0000"
###########
# classes #
###########
# =============================================================================
class GUIview(UI.TKI.GUItabView):
"""Implementation of the SIM Ground GUI layer"""
# ---------------------------------------------------------------------------
def __init__(self, master):
"""Initialise all GUI elements"""
UI.TKI.GUItabView.__init__(self, master, "GRND", "Ground Segment")
# menu buttons
self.menuButtons = UI.TKI.MenuButtons(self,
[["AD-I", self.initialiseADcallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG],
["REC+", self.recordFramesCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG],
["REC-", self.stopFrameRecorderCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED]])
self.appGrid(self.menuButtons,
row=0,
columnspan=2,
rowweight=0,
sticky=tkinter.EW)
# checkbuttons
self.checkButtons = UI.TKI.Checkbuttons(self,
[["ACK1", self.ack1Callback, True, COLOR_ON_OK],
["NAK1", self.nak1Callback, False, COLOR_ON_NOK],
["ACK2", self.ack2Callback, True, COLOR_ON_OK],
["NAK2", self.nak2Callback, False, COLOR_ON_NOK]])
self.appGrid(self.checkButtons,
row=1,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.W)
# tm status
self.tmStatusField = UI.TKI.ValueField(self, row=2, label="NCTRS TM status:")
self.tmStatusField.set("INIT")
self.tmStatusField.setBackground(COLOR_INITIALISED)
# tm port
self.tmPortField = UI.TKI.ValueField(self, row=3, label="NCTRS TM port:")
self.tmPortField.set(GRND.IF.s_configuration.nctrsTMport)
# tc status
self.tcStatusField = UI.TKI.ValueField(self, row=4, label="NCTRS TC status:")
self.tcStatusField.set("INIT")
self.tcStatusField.setBackground(COLOR_INITIALISED)
# tc port
self.tcPortField = UI.TKI.ValueField(self, row=5, label="NCTRS TC port:")
self.tcPortField.set(GRND.IF.s_configuration.nctrsTCport)
# admin status
self.adminStatusField = UI.TKI.ValueField(self, row=6, label="NCTRS admin message status:")
self.adminStatusField.set("INIT")
self.adminStatusField.setBackground(COLOR_INITIALISED)
# admin port
self.adminPortField = UI.TKI.ValueField(self, row=7, label="NCTRS admin message port:")
self.adminPortField.set(GRND.IF.s_configuration.nctrsAdminPort)
# log messages (default logger)
self.messageLogger = UI.TKI.MessageLogger(self)
self.appGrid(self.messageLogger, row=8, columnspan=2)
# message line
self.messageline = tkinter.Message(self, relief=tkinter.GROOVE)
self.appGrid(self.messageline,
row=9,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.EW)
self.grid(row=0, column=0, sticky=tkinter.EW+tkinter.NS)
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# ---------------------------------------------------------------------------
def fillCommandMenuItems(self):
"""
fill the command menu bar,
implementation of UI.TKI.GUItabView.fillCommandMenuItems
"""
self.addCommandMenuItem(label="InitialiseAD", command=self.initialiseADcallback)
self.addCommandMenuItem(label="GRNDenableAck1", command=self.grndEnableAck1Callback, enabled=False)
self.addCommandMenuItem(label="GRNDenableNak1", command=self.grndEnableNak1Callback)
self.addCommandMenuItem(label="GRNDdisableAck1", command=self.grndDisableAck1Callback)
self.addCommandMenuItem(label="GRNDenableAck2", command=self.grndEnableAck2Callback, enabled=False)
self.addCommandMenuItem(label="GRNDenableNak2", command=self.grndEnableNak2Callback)
self.addCommandMenuItem(label="GRNDdisableAck2", command=self.grndDisableAck2Callback)
self.addCommandMenuItem(label="RecordFrames", command=self.recordFramesCallback)
self.addCommandMenuItem(label="StopFrameRecorder", command=self.stopFrameRecorderCallback, enabled=False)
# ---------------------------------------------------------------------------
def initialiseADcallback(self):
"""Called when the InitialiseAD menu entry is selected"""
self.notifyModelTask(["INITIALISEAD"])
# ---------------------------------------------------------------------------
def grndEnableAck1Callback(self):
"""Called when the GRNDenableAck1 menu entry is selected"""
self.notifyModelTask(["GRNDENABLEACK1"])
def grndEnableNak1Callback(self):
"""Called when the GRNDenableNak1 menu entry is selected"""
self.notifyModelTask(["GRNDENABLENAK1"])
def grndDisableAck1Callback(self):
"""Called when the GRNDdisableAck1 menu entry is selected"""
self.notifyModelTask(["GRNDDISABLEACK1"])
def ack1Callback(self):
"""Called when the ACK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK1"):
self.notifyModelTask(["GRNDENABLEACK1"])
else:
self.notifyModelTask(["GRNDDISABLEACK1"])
def nak1Callback(self):
"""Called when the NAK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK1"):
self.notifyModelTask(["GRNDENABLENAK1"])
else:
self.notifyModelTask(["GRNDDISABLEACK1"])
# ---------------------------------------------------------------------------
def grndEnableAck2Callback(self):
"""Called when the GRNDenableAck2 menu entry is selected"""
self.notifyModelTask(["GRNDENABLEACK2"])
def grndEnableNak2Callback(self):
"""Called when the GRNDenableNak2 menu entry is selected"""
self.notifyModelTask(["GRNDENABLENAK2"])
def grndDisableAck2Callback(self):
"""Called when the GRNDdisableAck2 menu entry is selected"""
self.notifyModelTask(["GRNDDISABLEACK2"])
def ack2Callback(self):
"""Called when the ACK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK2"):
self.notifyModelTask(["GRNDENABLEACK2"])
else:
self.notifyModelTask(["GRNDDISABLEACK2"])
def nak2Callback(self):
"""Called when the NAK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK2"):
self.notifyModelTask(["GRNDENABLENAK2"])
else:
self.notifyModelTask(["GRNDDISABLEACK2"])
# ---------------------------------------------------------------------------
def recordFramesCallback(self):
"""Called when the RecordFrames menu entry is selected"""
fileName = filedialog.asksaveasfilename(title="Create TM Frame Record File",
initialdir=SCOS.ENV.s_environment.tmFilesDir())
if fileName != "" and fileName != ():
self.notifyModelTask(["RECORDFRAMES", fileName])
# ---------------------------------------------------------------------------
def stopFrameRecorderCallback(self):
"""Called when the StopFrameRecorder menu entry is selected"""
self.notifyModelTask(["STOPFRAMERECORDER"])
# ---------------------------------------------------------------------------
def notifyStatus(self, status):
"""Generic callback when something changes in the model"""
if status == "TM_CONNECTED":
self.tmConnectedNotify()
elif status == "TM_DISCONNECTED":
self.tmDisconnectedNotify()
elif status == "TC_CONNECTED":
self.tcConnectedNotify()
elif status == "TC_DISCONNECTED":
self.tcDisconnectedNotify()
elif status == "ADMIN_CONNECTED":
self.adminConnectedNotify()
elif status == "ADMIN_DISCONNECTED":
self.adminDisconnectedNotify()
elif status == "GRND_ENABLED_ACK1":
self.grndEnabledAck1Notify()
elif status == "GRND_ENABLED_NAK1":
self.grndEnabledNak1Notify()
elif status == "GRND_DISABLED_ACK1":
self.grndDisabledAck1Notify()
elif status == "GRND_ENABLED_ACK2":
self.grndEnabledAck2Notify()
elif status == "GRND_ENABLED_NAK2":
self.grndEnabledNak2Notify()
elif status == "GRND_DISABLED_ACK2":
self.grndDisabledAck2Notify()
elif status == "FRAME_REC_STARTED":
self.frameRecStarted()
elif status == "FRAME_REC_STOPPED":
self.frameRecStopped()
# ---------------------------------------------------------------------------
def tmConnectedNotify(self):
"""Called when the TM connect function is successfully processed"""
self.tmStatusField.set("CONNECTED")
self.tmStatusField.setBackground(COLOR_CONNECTED)
# ---------------------------------------------------------------------------
def tmDisconnectedNotify(self):
"""Called when the TM disconnect function is successfully processed"""
self.tmStatusField.set("DISCONNECTED")
self.tmStatusField.setBackground(COLOR_INITIALISED)
# ---------------------------------------------------------------------------
def tcConnectedNotify(self):
"""Called when the TC connect function is successfully processed"""
self.tcStatusField.set("CONNECTED")
self.tcStatusField.setBackground(COLOR_CONNECTED)
# ---------------------------------------------------------------------------
def tcDisconnectedNotify(self):
"""Called when the TC disconnect function is successfully processed"""
self.tcStatusField.set("DISCONNECTED")
self.tcStatusField.setBackground(COLOR_INITIALISED)
# ---------------------------------------------------------------------------
def adminConnectedNotify(self):
"""Called when the admin connect function is successfully processed"""
self.adminStatusField.set("CONNECTED")
self.adminStatusField.setBackground(COLOR_CONNECTED)
# ---------------------------------------------------------------------------
def adminDisconnectedNotify(self):
"""Called when the admin disconnect function is successfully processed"""
self.adminStatusField.set("DISCONNECTED")
self.adminStatusField.setBackground(COLOR_INITIALISED)
# ---------------------------------------------------------------------------
def grndEnabledAck1Notify(self):
"""Called when the grndEnabledAck1 function is successfully processed"""
self.disableCommandMenuItem("GRNDenableAck1")
self.enableCommandMenuItem("GRNDenableNak1")
self.enableCommandMenuItem("GRNDdisableAck1")
self.checkButtons.setButtonPressed("ACK1", True)
self.checkButtons.setButtonPressed("NAK1", False)
def grndEnabledNak1Notify(self):
"""Called when the grndEnabledNak1 function is successfully processed"""
self.enableCommandMenuItem("GRNDenableAck1")
self.disableCommandMenuItem("GRNDenableNak1")
self.enableCommandMenuItem("GRNDdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", True)
def grndDisabledAck1Notify(self):
"""Called when the grndDisabledAck1 function is successfully processed"""
self.enableCommandMenuItem("GRNDenableAck1")
self.enableCommandMenuItem("GRNDenableNak1")
self.disableCommandMenuItem("GRNDdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", False)
# ---------------------------------------------------------------------------
def grndEnabledAck2Notify(self):
"""Called when the grndEnabledAck2 function is successfully processed"""
self.disableCommandMenuItem("GRNDenableAck2")
self.enableCommandMenuItem("GRNDenableNak1")
self.enableCommandMenuItem("GRNDdisableAck2")
self.checkButtons.setButtonPressed("ACK2", True)
self.checkButtons.setButtonPressed("NAK2", False)
def grndEnabledNak2Notify(self):
"""Called when the grndEnabledNak2 function is successfully processed"""
self.enableCommandMenuItem("GRNDenableAck2")
self.disableCommandMenuItem("GRNDenableNak2")
self.enableCommandMenuItem("GRNDdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", True)
def grndDisabledAck2Notify(self):
"""Called when the grndDisabledAck2 function is successfully processed"""
self.enableCommandMenuItem("GRNDenableAck2")
self.enableCommandMenuItem("GRNDenableNak2")
self.disableCommandMenuItem("GRNDdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", False)
# ---------------------------------------------------------------------------
def frameRecStarted(self):
"""Called when the recordFrames function is successfully processed"""
self.disableCommandMenuItem("RecordFrames")
self.enableCommandMenuItem("StopFrameRecorder")
self.menuButtons.setState("REC+", tkinter.DISABLED)
self.menuButtons.setState("REC-", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def frameRecStopped(self):
"""Called when the stopFrameRecorder function is successfully processed"""
self.enableCommandMenuItem("RecordFrames")
self.disableCommandMenuItem("StopFrameRecorder")
self.menuButtons.setState("REC+", tkinter.NORMAL)
self.menuButtons.setState("REC-", tkinter.DISABLED)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.talent_v4beta1.types import common
from google.cloud.talent_v4beta1.types import completion_service
from .transports.base import CompletionTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CompletionGrpcTransport
from .transports.grpc_asyncio import CompletionGrpcAsyncIOTransport
class CompletionClientMeta(type):
"""Metaclass for the Completion client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[CompletionTransport]]
_transport_registry["grpc"] = CompletionGrpcTransport
_transport_registry["grpc_asyncio"] = CompletionGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[CompletionTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CompletionClient(metaclass=CompletionClientMeta):
"""A service handles auto completion."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "jobs.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CompletionTransport:
"""Returns the transport used by the client instance.
Returns:
CompletionTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def company_path(project: str, tenant: str, company: str,) -> str:
"""Returns a fully-qualified company string."""
return "projects/{project}/tenants/{tenant}/companies/{company}".format(
project=project, tenant=tenant, company=company,
)
@staticmethod
def parse_company_path(path: str) -> Dict[str, str]:
"""Parses a company path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/companies/(?P<company>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CompletionTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the completion client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CompletionTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CompletionTransport):
# transport is a CompletionTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def complete_query(
self,
request: Union[completion_service.CompleteQueryRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> completion_service.CompleteQueryResponse:
r"""Completes the specified prefix with keyword
suggestions. Intended for use by a job search
auto-complete search box.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_complete_query():
# Create a client
client = talent_v4beta1.CompletionClient()
# Initialize request argument(s)
request = talent_v4beta1.CompleteQueryRequest(
parent="parent_value",
query="query_value",
page_size=951,
)
# Make the request
response = client.complete_query(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.CompleteQueryRequest, dict]):
The request object. Auto-complete parameters.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.CompleteQueryResponse:
Response of auto-complete query.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a completion_service.CompleteQueryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, completion_service.CompleteQueryRequest):
request = completion_service.CompleteQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.complete_query]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CompletionClient",)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pymongo
try:
import mongomock
except ImportError:
mongomock = None
from airflow.contrib.hooks.mongo_hook import MongoHook
from airflow.models import Connection
from airflow.utils import db
class MongoHookTest(MongoHook):
"""
Extending hook so that a mockmongo collection object can be passed in
to get_collection()
"""
def __init__(self, conn_id='mongo_default', *args, **kwargs):
super(MongoHookTest, self).__init__(conn_id=conn_id, *args, **kwargs)
def get_collection(self, mock_collection, mongo_db=None):
return mock_collection
class TestMongoHook(unittest.TestCase):
def setUp(self):
self.hook = MongoHookTest(conn_id='mongo_default', mongo_db='default')
self.conn = self.hook.get_conn()
db.merge_conn(
Connection(
conn_id='mongo_default_with_srv', conn_type='mongo',
host='mongo', port='27017', extra='{"srv": true}'))
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_get_conn(self):
self.assertEqual(self.hook.connection.port, 27017)
self.assertIsInstance(self.conn, pymongo.MongoClient)
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_srv(self):
hook = MongoHook(conn_id='mongo_default_with_srv')
self.assertTrue(hook.uri.startswith('mongodb+srv://'))
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_insert_one(self):
collection = mongomock.MongoClient().db.collection
obj = {'test_insert_one': 'test_value'}
self.hook.insert_one(collection, obj)
result_obj = collection.find_one(filter=obj)
self.assertEqual(obj, result_obj)
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_insert_many(self):
collection = mongomock.MongoClient().db.collection
objs = [
{'test_insert_many_1': 'test_value'},
{'test_insert_many_2': 'test_value'}
]
self.hook.insert_many(collection, objs)
result_objs = collection.find()
result_objs = [result for result in result_objs]
self.assertEqual(len(result_objs), 2)
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_update_one(self):
collection = mongomock.MongoClient().db.collection
obj = {'_id': '1', 'field': 0}
collection.insert_one(obj)
filter_doc = obj
update_doc = {'$inc': {'field': 123}}
self.hook.update_one(collection, filter_doc, update_doc)
result_obj = collection.find_one(filter='1')
self.assertEqual(123, result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_update_one_with_upsert(self):
collection = mongomock.MongoClient().db.collection
filter_doc = {'_id': '1', 'field': 0}
update_doc = {'$inc': {'field': 123}}
self.hook.update_one(collection, filter_doc, update_doc, upsert=True)
result_obj = collection.find_one(filter='1')
self.assertEqual(123, result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_update_many(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 0}
obj2 = {'_id': '2', 'field': 0}
collection.insert_many([obj1, obj2])
filter_doc = {'field': 0}
update_doc = {'$inc': {'field': 123}}
self.hook.update_many(collection, filter_doc, update_doc)
result_obj = collection.find_one(filter='1')
self.assertEqual(123, result_obj['field'])
result_obj = collection.find_one(filter='2')
self.assertEqual(123, result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_update_many_with_upsert(self):
collection = mongomock.MongoClient().db.collection
filter_doc = {'_id': '1', 'field': 0}
update_doc = {'$inc': {'field': 123}}
self.hook.update_many(collection, filter_doc, update_doc, upsert=True)
result_obj = collection.find_one(filter='1')
self.assertEqual(123, result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_replace_one(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 'test_value_1'}
obj2 = {'_id': '2', 'field': 'test_value_2'}
collection.insert_many([obj1, obj2])
obj1['field'] = 'test_value_1_updated'
self.hook.replace_one(collection, obj1)
result_obj = collection.find_one(filter='1')
self.assertEqual('test_value_1_updated', result_obj['field'])
# Other document should stay intact
result_obj = collection.find_one(filter='2')
self.assertEqual('test_value_2', result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_replace_one_with_filter(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 'test_value_1'}
obj2 = {'_id': '2', 'field': 'test_value_2'}
collection.insert_many([obj1, obj2])
obj1['field'] = 'test_value_1_updated'
self.hook.replace_one(collection, obj1, {'field': 'test_value_1'})
result_obj = collection.find_one(filter='1')
self.assertEqual('test_value_1_updated', result_obj['field'])
# Other document should stay intact
result_obj = collection.find_one(filter='2')
self.assertEqual('test_value_2', result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_replace_one_with_upsert(self):
collection = mongomock.MongoClient().db.collection
obj = {'_id': '1', 'field': 'test_value_1'}
self.hook.replace_one(collection, obj, upsert=True)
result_obj = collection.find_one(filter='1')
self.assertEqual('test_value_1', result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_replace_many(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 'test_value_1'}
obj2 = {'_id': '2', 'field': 'test_value_2'}
collection.insert_many([obj1, obj2])
obj1['field'] = 'test_value_1_updated'
obj2['field'] = 'test_value_2_updated'
self.hook.replace_many(collection, [obj1, obj2])
result_obj = collection.find_one(filter='1')
self.assertEqual('test_value_1_updated', result_obj['field'])
result_obj = collection.find_one(filter='2')
self.assertEqual('test_value_2_updated', result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_replace_many_with_upsert(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 'test_value_1'}
obj2 = {'_id': '2', 'field': 'test_value_2'}
self.hook.replace_many(collection, [obj1, obj2], upsert=True)
result_obj = collection.find_one(filter='1')
self.assertEqual('test_value_1', result_obj['field'])
result_obj = collection.find_one(filter='2')
self.assertEqual('test_value_2', result_obj['field'])
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_delete_one(self):
collection = mongomock.MongoClient().db.collection
obj = {'_id': '1'}
collection.insert_one(obj)
self.hook.delete_one(collection, {'_id': '1'})
self.assertEqual(0, collection.count())
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_delete_many(self):
collection = mongomock.MongoClient().db.collection
obj1 = {'_id': '1', 'field': 'value'}
obj2 = {'_id': '2', 'field': 'value'}
collection.insert_many([obj1, obj2])
self.hook.delete_many(collection, {'field': 'value'})
self.assertEqual(0, collection.count())
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_find_one(self):
collection = mongomock.MongoClient().db.collection
obj = {'test_find_one': 'test_value'}
collection.insert(obj)
result_obj = self.hook.find(collection, {}, find_one=True)
result_obj = {result: result_obj[result] for result in result_obj}
self.assertEqual(obj, result_obj)
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_find_many(self):
collection = mongomock.MongoClient().db.collection
objs = [{'test_find_many_1': 'test_value'}, {'test_find_many_2': 'test_value'}]
collection.insert(objs)
result_objs = self.hook.find(collection, {}, find_one=False)
result_objs = [result for result in result_objs]
self.assertGreater(len(result_objs), 1)
@unittest.skipIf(mongomock is None, 'mongomock package not present')
def test_aggregate(self):
collection = mongomock.MongoClient().db.collection
objs = [
{
'test_id': '1',
'test_status': 'success'
},
{
'test_id': '2',
'test_status': 'failure'
},
{
'test_id': '3',
'test_status': 'success'
}
]
collection.insert(objs)
aggregate_query = [
{"$match": {'test_status': 'success'}}
]
results = self.hook.aggregate(collection, aggregate_query)
results = [result for result in results]
self.assertEqual(len(results), 2)
def test_context_manager(self):
with MongoHook(conn_id='mongo_default', mongo_db='default') as ctxHook:
ctxHook.get_conn()
self.assertIsInstance(ctxHook, MongoHook)
self.assertIsNotNone(ctxHook.client)
self.assertIsNone(ctxHook.client)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import struct
import subprocess
import sys
import tempfile
import test_format
BUNDLE_SIZE = 32
def CreateElfContent(bits, text_segment):
e_ident = {
32: '\177ELF\1',
64: '\177ELF\2'}[bits]
e_machine = {
32: 3,
64: 62}[bits]
e_phoff = 256
e_phnum = 1
e_phentsize = 0
elf_header_fmt = {
32: '<16sHHIIIIIHHHHHH',
64: '<16sHHIQQQIHHHHHH'}[bits]
elf_header = struct.pack(
elf_header_fmt,
e_ident, 0, e_machine, 0, 0, e_phoff, 0, 0, 0,
e_phentsize, e_phnum, 0, 0, 0)
p_type = 1 # PT_LOAD
p_flags = 5 # r-x
p_filesz = len(text_segment)
p_memsz = p_filesz
p_vaddr = 0
p_offset = 512
p_align = 0
p_paddr = 0
pheader_fmt = {
32: '<IIIIIIII',
64: '<IIQQQQQQ'}[bits]
pheader_fields = {
32: (p_type, p_offset, p_vaddr, p_paddr,
p_filesz, p_memsz, p_flags, p_align),
64: (p_type, p_flags, p_offset, p_vaddr,
p_paddr, p_filesz, p_memsz, p_align)}[bits]
pheader = struct.pack(pheader_fmt, *pheader_fields)
result = elf_header
assert len(result) <= e_phoff
result += '\0' * (e_phoff - len(result))
result += pheader
assert len(result) <= p_offset
result += '\0' * (p_offset - len(result))
result += text_segment
return result
def RunRdfaValidator(options, data):
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# TODO(shcherbina): get rid of custom prefix once
# https://code.google.com/p/nativeclient/issues/detail?id=3631
# is actually fixed.
tmp = tempfile.NamedTemporaryFile(prefix='tmprdfa_', mode='wb', delete=False)
try:
tmp.write(CreateElfContent(options.bits, data))
tmp.close()
proc = subprocess.Popen([options.rdfaval, tmp.name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
assert stdout == '', stdout
return_code = proc.wait()
finally:
tmp.close()
os.remove(tmp.name)
# Remove the carriage return characters that we get on Windows.
stderr = stderr.replace('\r', '')
return return_code, stderr
def ParseRdfaMessages(stdout):
"""Get (offset, message) pairs from rdfa validator output.
Args:
stdout: Output of rdfa validator as string.
Yields:
Pairs (offset, message).
"""
for line in stdout.split('\n'):
line = line.strip()
if line == '':
continue
if re.match(r"(Valid|Invalid)\.$", line):
continue
m = re.match(r'([0-9a-f]+): (.*)$', line, re.IGNORECASE)
assert m is not None, "can't parse line '%s'" % line
offset = int(m.group(1), 16)
message = m.group(2)
if not message.startswith('warning - '):
yield offset, message
def CheckValidJumpTargets(options, data_chunks):
"""
Check that the validator infers valid jump targets correctly.
This test checks that the validator identifies instruction boundaries and
superinstructions correctly. In order to do that, it attempts to append a jump
to each byte at the end of the given code. Jump should be valid if and only if
it goes to the boundary between data chunks.
Note that the same chunks as in RunRdfaWithNopPatching are used, but here they
play a different role. In RunRdfaWithNopPatching the partitioning into chunks
is only relevant when the whole snippet is invalid. Here, on the other hand,
we only care about valid snippets, and we use chunks to mark valid jump
targets.
Args:
options: Options as produced by optparse.
data_chunks: List of strings containing binary data. Each such chunk is
expected to correspond to indivisible instruction or superinstruction.
Returns:
None.
"""
data = ''.join(data_chunks)
# Add nops to make it bundle-sized.
data += (-len(data) % BUNDLE_SIZE) * '\x90'
assert len(data) % BUNDLE_SIZE == 0
# Since we check validity of jump target by adding jump and validating
# resulting piece, we rely on validity of original snippet.
return_code, _ = RunRdfaValidator(options, data)
assert return_code == 0, 'Can only validate jump targets on valid snippet'
valid_jump_targets = set()
pos = 0
for data_chunk in data_chunks:
valid_jump_targets.add(pos)
pos += len(data_chunk)
valid_jump_targets.add(pos)
for i in range(pos + 1):
# Encode JMP with 32-bit relative target.
jump = '\xe9' + struct.pack('<i', i - (len(data) + 5))
return_code, _ = RunRdfaValidator(options, data + jump)
if return_code == 0:
assert i in valid_jump_targets, (
'Offset 0x%x was reported valid jump target' % i)
else:
assert i not in valid_jump_targets, (
'Offset 0x%x was reported invalid jump target' % i)
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'rdfa_output'
def CommandLineOptions(self, parser):
parser.add_option('--rdfaval', default='validator_test',
help='Path to the ncval validator executable')
def GetSectionContent(self, options, sections):
data_chunks = list(test_format.ParseHex(sections['hex']))
return_code, stdout = RunRdfaValidator(options, ''.join(data_chunks))
result = ''.join('%x: %s\n' % (offset, message)
for offset, message in ParseRdfaMessages(stdout))
result += 'return code: %d\n' % return_code
if return_code == 0:
print ' Checking jump targets...'
CheckValidJumpTargets(options, data_chunks)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import decimal
import sys
import unittest
from parameterized import parameterized
from airflow.contrib.operators.mysql_to_gcs import \
MySqlToGoogleCloudStorageOperator
from tests.compat import mock
PY3 = sys.version_info[0] == 3
TASK_ID = 'test-mysql-to-gcs'
MYSQL_CONN_ID = 'mysql_conn_test'
TZ_QUERY = "SET time_zone = '+00:00'"
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
CSV_FILENAME = 'test_{}.csv'
ROWS = [
('mock_row_content_1', 42),
('mock_row_content_2', 43),
('mock_row_content_3', 44)
]
CURSOR_DESCRIPTION = (
('some_str', 0, 0, 0, 0, 0, False),
('some_num', 1005, 0, 0, 0, 0, False)
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n'
]
CSV_LINES = [
b'some_str,some_num\r\n'
b'mock_row_content_1,42\r\n',
b'mock_row_content_2,43\r\n',
b'mock_row_content_3,44\r\n'
]
CSV_LINES_PIPE_DELIMITED = [
b'some_str|some_num\r\n'
b'mock_row_content_1|42\r\n',
b'mock_row_content_2|43\r\n',
b'mock_row_content_3|44\r\n'
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "REQUIRED", "name": "some_str", "type": "FLOAT"}, ',
b'{"mode": "REQUIRED", "name": "some_num", "type": "STRING"}]'
]
class MySqlToGoogleCloudStorageOperatorTest(unittest.TestCase):
def test_init(self):
"""Test MySqlToGoogleCloudStorageOperator instance is properly initialized."""
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME,
export_format='CSV', field_delimiter='|')
self.assertEqual(op.task_id, TASK_ID)
self.assertEqual(op.sql, SQL)
self.assertEqual(op.bucket, BUCKET)
self.assertEqual(op.filename, JSON_FILENAME)
self.assertEqual(op.export_format, 'csv')
self.assertEqual(op.field_delimiter, '|')
@parameterized.expand([
("string", None, "string"),
(datetime.date(1970, 1, 2), None, 86400),
(datetime.datetime(1970, 1, 1, 1, 0), None, 3600),
(decimal.Decimal(5), None, 5),
(b"bytes", "BYTES", "Ynl0ZXM="),
])
def test_convert_type(self, value, schema_type, expected):
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mysql_conn_id=MYSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
self.assertEqual(
op.convert_type(value, schema_type),
expected)
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_json(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mysql_conn_id=MYSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(JSON_FILENAME.format(0), obj)
self.assertEqual('application/json', mime_type)
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(NDJSON_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mysql_hook_mock_class.assert_called_once_with(mysql_conn_id=MYSQL_CONN_ID)
mysql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_csv(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test successful run of execute function for CSV"""
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mysql_conn_id=MYSQL_CONN_ID,
sql=SQL,
export_format='CSV',
bucket=BUCKET,
filename=CSV_FILENAME)
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(CSV_FILENAME.format(0), obj)
self.assertEqual('text/csv', mime_type)
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(CSV_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mysql_hook_mock_class.assert_called_once_with(mysql_conn_id=MYSQL_CONN_ID)
mysql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_csv_ensure_utc(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test successful run of execute function for CSV"""
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mysql_conn_id=MYSQL_CONN_ID,
sql=SQL,
export_format='CSV',
bucket=BUCKET,
filename=CSV_FILENAME,
ensure_utc=True)
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(CSV_FILENAME.format(0), obj)
self.assertEqual('text/csv', mime_type)
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(CSV_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mysql_hook_mock_class.assert_called_once_with(mysql_conn_id=MYSQL_CONN_ID)
mysql_hook_mock.get_conn().cursor().execute.assert_has_calls([mock.call(TZ_QUERY), mock.call(SQL)])
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_csv_with_delimiter(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test successful run of execute function for CSV with a field delimiter"""
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mysql_conn_id=MYSQL_CONN_ID,
sql=SQL,
export_format='csv',
field_delimiter='|',
bucket=BUCKET,
filename=CSV_FILENAME)
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(CSV_FILENAME.format(0), obj)
self.assertEqual('text/csv', mime_type)
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(CSV_LINES_PIPE_DELIMITED), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mysql_hook_mock_class.assert_called_once_with(mysql_conn_id=MYSQL_CONN_ID)
mysql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_file_splitting(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual('application/json', mime_type)
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(expected_upload[obj], file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]))
op.execute(None)
@mock.patch('airflow.contrib.operators.mysql_to_gcs.MySqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_schema_file(self, gcs_hook_mock_class, mysql_hook_mock_class):
"""Test writing schema files."""
mysql_hook_mock = mysql_hook_mock_class.return_value
mysql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mysql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip): # pylint: disable=unused-argument
if obj == SCHEMA_FILENAME:
self.assertFalse(gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(SCHEMA_JSON), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MySqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME)
op.execute(None)
# once for the file and once for the schema
self.assertEqual(2, gcs_hook_mock.upload.call_count)
|
|
import numpy
import echidna.output as output
from echidna.errors.custom_errors import LimitError, CompatibilityError
from echidna.limit import summary
from echidna.output import store
import logging
import time
class Limit(object):
""" Class to handle main limit setting.
Args:
signal (:class:`echidna.core.spectra.Spectra`): signal spectrum you wish
to obtain a limit for.
fitter (:class:`echidna.limit.fit.Fit`): The fitter used to set a
a limit with.
shrink (bool, optional): If set to True, :meth:`shrink` method is
called on the signal spectrum before limit setting, shrinking to
ROI.
per_bin (bool, optional): If set to True, the values of the test
statistic over spectral dimensions (per bin) will be stored.
Attributes:
_signal (:class:`echidna.core.spectra.Spectra`): signal spectrum you wish
to obtain a limit for.
_fitter (:class:`echidna.limit.fit.Fit`): The fitter used to set a
a limit with.
_stats (:class:`numpy.ndarray`): Data container for test
statistic values.
_per_bin (bool): If set to True, the values of the test statistic
over spectral dimensions (per bin) will be stored.
"""
def __init__(self, signal, fitter, shrink=True, per_bin=False):
if ((per_bin and not fitter._per_bin) or
(not per_bin and fitter._per_bin)):
raise ValueError("Mismatch in per_bin flags. To use per_bin "
"effectively, both Fitter and Limit instances "
"should have per_bin enabled.\n fitter: %s\n "
"limit: %s" (fitter._per_bin, per_bin))
self._per_bin = per_bin
self._logger = logging.getLogger(name="Limit")
self._fitter = fitter
self._fitter.check_fit_config(signal)
self._fitter.set_signal(signal, shrink=shrink)
self._signal = signal
def get_array_limit(self, array, limit=2.71):
""" Get the limit from an array containing statisics
Args:
array (:class:`numpy.array`): The array you want to set a limit for.
limit (float, optional): The value of the test statisic which
corresponds to the limit you want to set. The default is 2.71
which corresponds to 90% CL when using a chi-squared test
statistic.
Raises:
CompatibilityError: If the length of the array is not equal to the
number of signal scalings.
LimitError: If all values in the array are below limit.
Returns:
float: The signal scaling at the limit you are setting.
"""
counts = self._signal.get_fit_config().get_par("rate").get_values()
if len(counts) != len(array):
raise CompatibilityError("Array length and number of signal "
"scalings is different.")
i = 0
if not isinstance(array[0], float): # is array
array = self.sum_entries(array)
for entry in array:
if entry > limit:
return counts[i]
i += 1
raise LimitError("Unable to find limit. Max stat: %s, Limit: %s"
% (array[-1], limit))
def get_limit(self, limit=2.71, stat_zero=None, store_summary=True):
""" Get the limit using the signal spectrum.
Args:
limit (float, optional): The value of the test statisic which
corresponds to the limit you want to set. The default is 2.71
which corresponds to 90% CL when using a chi-squared test
statistic.
stat_zero (float or :class:`numpy.ndarray`, optional): Enables
calculation of e.g. delta chi-squared. Include values of
test statistic for zero signal contribution, so these can be
subtracted from the values of the test statistic, with signal.
store_summary (bool, optional): If True (default) then a hdf5 file
is produced containing best fit values for systematics, total
delta chi-squared and penalty chi_squared of each systematic as a
function of signal scaling. The prior and sigma values used are
also stored. A log file is also produced for the values of best
fits and penalty chi_squared of each systematic,
total chi_squared, number of degrees of freedom and signal scaling
at the requested limit.
Raises:
TypeError: If stat_zero is not a numpy array, when per_bin is
enabled.
LimitError: If all values in the array are below limit.
Returns:
float: The signal scaling at the limit you are setting.
"""
par = self._signal.get_fit_config().get_par("rate")
# Create stats array
shape = self._signal.get_fit_config().get_shape()
stats = numpy.zeros(shape, dtype=numpy.float64)
if stat_zero: # If supplied specific stat_zero use this
if self._per_bin:
if not isinstance(stat_zero, numpy.ndarray):
raise TypeError("For per_bin enabled, "
"stat_zero should be a numpy array")
min_per_bin = stat_zero
min_stat = numpy.sum(stat_zero)
else:
min_per_bin = None
min_stat = stat_zero
else: # check zero signal stat in case its not in self._stats
self._fitter.remove_signal()
min_stat = self._fitter.fit()
fit_results = self._fitter.get_fit_results()
minimum_position = fit_results.get_minimum_position()
# Get per_bin array getting stats at minimum position
min_per_bin = fit_results.get_stat(minimum_position)
# Create summary
scales = par.get_values()
summary_name = self._fitter.get_fit_config().get_name()
if self._per_bin: # want full Summary class
limit_summary = summary.Summary(
summary_name, len(scales),
spectra_config=self._signal.get_config(),
fit_config=self._fitter.get_fit_config())
else: # use ReducedSummary
limit_summary = summary.ReducedSummary(
summary_name, len(scales),
spectra_config=self._signal.get_config(),
fit_config=self._fitter.get_fit_config())
limit_summary.set_scales(scales)
# Set prior and sigma values
for par_name in self._fitter.get_fit_config().get_pars():
cur_par = self._fitter.get_fit_config().get_par(par_name)
limit_summary.set_prior(cur_par.get_prior(), par_name)
limit_summary.set_sigma(cur_par.get_sigma(), par_name)
# Loop through signal scalings
for i, scale in enumerate(par.get_values()):
self._logger.debug("signal scale: %.4g" % scale)
if not numpy.isclose(scale, 0.):
if self._fitter.get_signal() is None:
self._fitter.set_signal(self._signal, shrink=False)
self._signal.scale(scale)
else: # want no signal contribution
self._fitter.remove_signal()
self._logger.warning("Removing signal in fit for scale %.4g" %
scale)
stat = self._fitter.fit() # best-fit test statistic for this scale
stats[i] = stat
fit_results = self._fitter.get_fit_results() # get results
results_summary = fit_results.get_summary()
for par_name, value in results_summary.iteritems():
limit_summary.set_best_fit(value.get("best_fit"),
i, par_name)
limit_summary.set_penalty_term(value.get("penalty_term"),
i, par_name)
if self._per_bin:
minimum_position = fit_results.get_minimum_position()
# Get per_bin array getting stats at minimum position
min_per_bin = fit_results.get_raw_stat(minimum_position)
limit_summary.set_stat(min_per_bin, i)
else: # just use single stat
limit_summary.set_stat(stat, i)
# Find array minimum - use whichever is largest out of array min and
# previously calculated min_stat
if stats.min() > min_stat:
min_stat = stats.min()
if self._per_bin:
# Now we want the corresponding per_bin values
min_per_bin = limit_summary.get_raw_stat(stats.argmin())
# Convert stats to delta - subtracting minimum
stats -= min_stat
limit_summary.set_stats(limit_summary.get_raw_stats() - min_per_bin)
# Also want to know index of minimum
min_bin = numpy.argmin(stats)
try:
# Slice from min_bin upwards
log_text = ""
i_limit = numpy.where(stats[min_bin:] > limit)[0][0]
limit = par.get_values()[min_bin + i_limit]
limit_summary.set_limit(limit)
limit_summary.set_limit_idx(min_bin + i_limit)
log_text += "\n===== Limit Summary =====\nLimit found at:\n"
log_text += "Signal Decays: %.4g\n" % limit
for parameter in self._fitter.get_fit_config().get_pars():
log_text += "--- systematic: %s ---\n" % parameter
log_text += ("Best fit: %4g\n" %
limit_summary.get_best_fit(i_limit, parameter))
log_text += ("Prior: %.4g\n" %
limit_summary.get_prior(parameter))
log_text += ("Sigma: %.4g\n" %
limit_summary.get_sigma(parameter))
log_text += ("Penalty term: %.4g\n" %
limit_summary.get_penalty_term(i_limit,
parameter))
log_text += "----------------------------\n"
log_text += "Test statistic: %.4f\n" % stats[i_limit]
log_text += "N.D.F.: 1\n" # Only fit one dof currently
logging.getLogger("extra").info("\n%s\n" % log_text)
if (store_summary and self._fitter.get_fit_config() is not None):
timestamp = "%.f" % time.time() # seconds since epoch
path = output.__default_save_path__ + "/"
fname = limit_summary.get_name() + "_" + timestamp + ".hdf5"
store.dump_summary(path + fname, limit_summary)
store.dump(path + fname, self._fitter.get_data(),
append=True, group_name="data")
if self._fitter.get_fixed_background() is not None:
store.dump(path + fname,
self._fitter.get_fixed_background(),
append=True, group_name="fixed")
for background in self._fitter.get_floating_backgrounds():
store.dump(path + fname, background, append=True,
group_name=background.get_name())
store.dump(path + fname, self._signal,
append=True, group_name="signal")
self._logger.info("Saved summary of %s to file %s" %
(limit_summary.get_name(), path + fname))
return limit
except IndexError as detail:
# Slice from min_bin upwards
log_text = ""
i_limit = numpy.argmax(stats[min_bin:])
limit = par.get_values()[min_bin + i_limit]
log_text += "\n===== Limit Summary =====\nNo limit found:\n"
log_text += "Signal Decays (at max stat): %.4g\n" % limit
for parameter in self._fitter.get_fit_config().get_pars():
log_text += "--- systematic: %s ---\n" % parameter
log_text += ("Best fit: %4g\n" %
limit_summary.get_best_fit(i_limit, parameter))
log_text += ("Prior: %.4g\n" %
limit_summary.get_prior(parameter))
log_text += ("Sigma: %.4g\n" %
limit_summary.get_sigma(parameter))
log_text += ("Penalty term: %.4g\n" %
limit_summary.get_penalty_term(i_limit,
parameter))
log_text += "----------------------------\n"
log_text += "Test statistic: %.4f\n" % stats[i_limit]
log_text += "N.D.F.: 1\n" # Only fit one dof currently
logging.getLogger("extra").info("\n%s" % log_text)
if (store_summary and self._fitter.get_fit_config() is not None):
timestamp = "%.f" % time.time() # seconds since epoch
path = output.__default_save_path__ + "/"
fname = limit_summary.get_name() + "_" + timestamp + ".hdf5"
store.dump_summary(path + fname, limit_summary)
store.dump(path + fname, self._fitter.get_data(),
append=True, group_name="data")
if self._fitter.get_fixed_background() is not None:
store.dump(path + fname,
self._fitter.get_fixed_background(),
append=True, group_name="fixed")
for background in self._fitter.get_floating_backgrounds():
store.dump(path + fname, background, append=True,
group_name=background.get_name())
store.dump(path + fname, self._signal,
append=True, group_name="signal")
self._logger.info("Saved summary of %s to file %s" %
(limit_summary.get_name(), fname))
self._logger.error("Recieived: IndexError: %s" % detail)
raise LimitError("Unable to find limit. Max stat: %s, Limit: %s"
% (self._stats.max(), limit))
def get_statistics(self):
""" Get the test statistics for all signal scalings.
Returns:
:class:`numpy.array`: Of test statisics for all signal scalings.
"""
signal_config = self._signal.get_fit_config()
stats = []
for scale in signal_config.get_par("rate").get_values():
if not numpy.isclose(scale, 0.):
self._signal.scale(scale)
self._fitter.set_signal(self._signal, shrink=False)
else:
self._fitter.remove_signal()
stats.append(self._fitter.fit())
return numpy.array(stats)
def sum_entries(self, array):
""" Sums entries of an array which contains arrays as entries.
Args:
array (:class:`numpy.array`): The array you want to sum the
elements of.
Returns:
:class:`numpy.array`: The input array with its entries summed.
"""
new_array = []
for entry in array:
new_array.append(entry.sum())
return numpy.array(new_array)
|
|
import datetime
from pprint import pprint
from bs4 import BeautifulSoup
from dateutil.parser import *
from urllib.request import urlopen
from DBManager import DBManager
def calc_timedelta(fastest_time, driver_time):
try:
fastest = datetime.datetime.strptime(fastest_time, "%M:%S.%f")
driver_best = datetime.datetime.strptime(driver_time, "%M:%S.%f")
timedelta = driver_best - fastest
timedelta_string = '+' + str(timedelta.seconds) + '.' \
+ str(timedelta.microseconds)[0:3]
return timedelta_string
except ValueError:
return ""
class Scraper:
@staticmethod
def scrape_practice_results(url):
# JSON to be populated with scraped results
practice_json = {"timesheet": []}
# Opening provided URL to be scraped
r = urlopen(url).read()
soup = BeautifulSoup(r, "html.parser")
# Finding table of session results as a list
table = soup.find('table', attrs={'class': 'standing-table__table'})
cells = table.findAll('tr', attrs={'class': 'standing-table__row'})
# Fastest time of the session to calculate time delta
fastest_time = ""
# For each row in the results table
for c in cells[1:]:
entry = {}
# Scraping name of driver
nameObj = c.find('span')
nameJSON = nameObj.get_text()
entry["name"] = nameJSON
# Find other elements
otherObj = c.findAll('td')
# Scraping team of driver
teamJSON = otherObj[2].get_text()
entry["team"] = teamJSON
# Scraping position of driver in session
posJSON = otherObj[0].get_text()
entry["position"] = posJSON
# Scraping best time for driver in session
timeJSON = otherObj[3].get_text()
entry["time"] = timeJSON
if c == cells[1]:
fastest_time = timeJSON
# Calculating and adding time delta from fastest time to json
entry["timedelta"] = calc_timedelta(fastest_time, timeJSON)
# Adding add the scraped data as an entry to the timesheet JSON list
practice_json["timesheet"].append(entry)
return practice_json
@staticmethod
def scrape_qualifying_results(url):
# JSON to be populated with scraped results
qualifying_json = {"timesheet": []}
# Opening provided URL to be scraped
r = urlopen(url).read()
soup = BeautifulSoup(r, "html.parser")
# Finding table of session results as a list
table = soup.find('table', attrs={'class': 'standing-table__table'})
cells = table.findAll('tr', attrs={'class': 'standing-table__row'})
# Fastest time of the session to calculate time delata
fastest_time = ""
# For each row in the results table
for c in cells[1:]:
entry = {}
# Scraping name of driver
nameObj = c.find('span')
nameJSON = nameObj.get_text()
entry["name"] = nameJSON
# Find other elements
otherObj = c.findAll('td')
# Scraping team of driver
teamJSON = otherObj[2].get_text()
entry["team"] = teamJSON
# Scraping position of driver in session
otherObj = c.findAll('td')
posJSON = otherObj[0].get_text()
entry["position"] = posJSON
# Scraping best time for driver in session
timeJSON = otherObj[5].get_text()
entry["time"] = timeJSON
if c == cells[1]:
fastest_time = timeJSON
# Calculating and adding time delta from fastest time to json
entry["timedelta"] = calc_timedelta(fastest_time, timeJSON)
qualifying_json["timesheet"].append(entry)
return qualifying_json
@staticmethod
def scrape_race_results(url):
# JSON to be populated with scraped results
race_json = {"timesheet": []}
# Opening provided URL to be scraped
r = urlopen(url).read()
soup = BeautifulSoup(r, "html.parser")
# Finding table of session results as a list
table = soup.find('table', attrs={'class': 'standing-table__table'})
cells = table.findAll('tr', attrs={'class': 'standing-table__row'})
# For each row in the results table
for c in cells[1:]:
entry = {}
# Scraping name of driver
nameObj = c.find('span')
nameJSON = nameObj.get_text()
entry["name"] = nameJSON
# Find other elements
otherObj = c.findAll('td')
# Scraping team of driver
teamJSON = otherObj[2].get_text()
entry["team"] = teamJSON
# Scraping position of driver in session
posJSON = otherObj[0].get_text()
entry["position"] = posJSON
# Scraping best time for driver in session
timeJSON = otherObj[5].get_text()
entry["timedelta"] = timeJSON
# Scraping points gained from the race for the driver
pointsJSON = otherObj[7].get_text()
entry["pointsGained"] = pointsJSON
race_json["timesheet"].append(entry)
return race_json
@staticmethod
def scrape_showtimes(year, url):
# JSON to be populated with scraped results
showtimes_json = {}
# Opening provided URL to be scraped
r = urlopen(url).read()
soup = BeautifulSoup(r, "html.parser")
days = soup.findAll('h3', attrs={'class': 'text-h4 -rs-style20 box'})
for d in days:
# Obtaining day of race weekend
session_date = d.get_text() + '-' + year
# Finding table of session results as a list for the particular day
day_sessions_obj = d.find_next_sibling()
cells = day_sessions_obj.findAll('div', attrs={'class':
'event-group -layout2'})
# For each row in the results table
for c in cells:
# Obtaining session name
session_name_obj = c.find('strong')
session_name_raw = session_name_obj.get_text()
session_name = session_name_raw.split('- ')[-1]
session_name = session_name.lower()
session_name = session_name.replace("practice ", "fp")
session_name = session_name.replace("qualifying ", "q")
session_name = session_name.replace(" ", "_")
# Obtaining session time, e.g. 09:00:00
session_time_obj = c.find('p', attrs={
'class': 'event-detail -center caption'})
session_time_raw = session_time_obj.get_text()
session_time = session_time_raw.split('(')[-1][:-1] + ":00"
# Obtaining session iso datetime, e.g. 2017-08-25T09:00:00
session_datetime = session_date + ' ' + session_time
session_datetime = parse(session_datetime).isoformat()
# Populating results json with session name and datetime
showtimes_json[session_name] = session_datetime
return showtimes_json
@staticmethod
def test():
json = Scraper.scrape_practice_results(
"http://www.skysports.com/f1/grandprix/"
"australia/results/2017/practice-1")
# pprint(json)
if __name__ == '__main__':
Scraper.test()
pass
|
|
from DistributedMinigameAI import *
from toontown.ai.ToonBarrier import *
from direct.fsm import ClassicFSM, State
from direct.directnotify import DirectNotifyGlobal
from toontown.minigame import ToonBlitzGlobals
from math import sqrt
class DistributedTwoDGameAI(DistributedMinigameAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTwoDGameAI')
def __init__(self, air, minigameId):
try:
self.DistributedTwoDGameAI_initialized
except:
self.DistributedTwoDGame_initialized = 1
DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTwoDGameAI', [State.State('inactive', self.enterInactive, self.exitInactive, ['play']), State.State('play', self.enterPlay, self.exitPlay, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['inactive'])], 'inactive', 'inactive')
self.addChildGameFSM(self.gameFSM)
self.finishedBonusDict = {}
self.finishedTimeLeftDict = {}
self.numFallDownDict = {}
self.numHitByEnemyDict = {}
self.numSquishDict = {}
self.treasuresCollectedDict = {}
self.sectionsSelected = []
self.enemyHealthTable = []
self.treasureTakenTable = []
self.sectionIndexList = []
def generate(self):
self.notify.debug('generate')
DistributedMinigameAI.generate(self)
def delete(self):
self.notify.debug('delete')
del self.gameFSM
DistributedMinigameAI.delete(self)
def setTrolleyZone(self, trolleyZone):
DistributedMinigameAI.setTrolleyZone(self, trolleyZone)
self.setupSections()
def setGameReady(self):
self.notify.debug('setGameReady')
DistributedMinigameAI.setGameReady(self)
self.numTreasures = ToonBlitzGlobals.NumTreasures
self.numEnemies = ToonBlitzGlobals.NumEnemies
self.numTreasuresTaken = 0
self.numEnemiesKilled = 0
for avId in self.scoreDict.keys():
self.scoreDict[avId] = 0
self.finishedBonusDict[avId] = 0
self.finishedTimeLeftDict[avId] = -1
self.numFallDownDict[avId] = 0
self.numHitByEnemyDict[avId] = 0
self.numSquishDict[avId] = 0
self.treasuresCollectedDict[avId] = [0,
0,
0,
0]
for i in xrange(len(self.sectionsSelected)):
sectionIndex = self.sectionsSelected[i][0]
attribs = ToonBlitzGlobals.SectionTypes[sectionIndex]
enemiesPool = attribs[3]
self.enemyHealthTable += [[]]
enemyIndicesSelected = self.sectionsSelected[i][1]
for j in xrange(len(enemyIndicesSelected)):
enemyIndex = enemyIndicesSelected[j]
enemyType = enemiesPool[enemyIndex][0]
self.enemyHealthTable[i] += [ToonBlitzGlobals.EnemyBaseHealth]
self.enemyHealthTable[i][j] *= self.numPlayers
if ToonBlitzGlobals.EnemyHealthMultiplier.has_key(enemyType):
self.enemyHealthTable[i][j] *= ToonBlitzGlobals.EnemyHealthMultiplier[enemyType]
self.treasureTakenTable += [[]]
treasureIndicesSelected = self.sectionsSelected[i][2]
for j in xrange(len(treasureIndicesSelected)):
self.treasureTakenTable[i] += [0]
enemyIndicesSelected = self.sectionsSelected[i][1]
for j in xrange(len(enemyIndicesSelected)):
self.treasureTakenTable[i] += [0]
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('play')
def setGameAbort(self):
self.notify.debug('setGameAbort')
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
scoreList = []
finishedBonusList = []
timeLeftList = []
treasureCollectedList = []
playerErrorList = []
for avId in self.avIdList:
scoreList.append(self.scoreDict[avId])
finishedBonusList.append(self.finishedBonusDict[avId])
timeLeftList.append(self.finishedTimeLeftDict[avId])
treasureCollectedList.append(self.treasuresCollectedDict[avId])
playerError = [self.numFallDownDict[avId], self.numHitByEnemyDict[avId], self.numSquishDict[avId]]
playerErrorList.append(playerError)
self.scoreDict[avId] = max(0, self.scoreDict[avId])
jellybeans = sqrt(self.scoreDict[avId] * ToonBlitzGlobals.ScoreToJellyBeansMultiplier)
self.scoreDict[avId] = max(1, int(jellybeans))
#self.air.writeServerEvent('minigame_twoD', self.doId, '%s|%s|%s|%s|%s|%s|%s|%s|%s' % (ToontownGlobals.TwoDGameId,
# self.getSafezoneId(),
# self.avIdList,
# scoreList,
# finishedBonusList,
# timeLeftList,
# treasureCollectedList,
# playerErrorList,
# self.sectionIndexList))
# jjkoletar: not interested
self.notify.debug('minigame_twoD%s: %s|%s|%s|%s|%s|%s|%s|%s|%s' % (self.doId,
ToontownGlobals.TwoDGameId,
self.getSafezoneId(),
self.avIdList,
scoreList,
finishedBonusList,
timeLeftList,
treasureCollectedList,
playerErrorList,
self.sectionIndexList))
self.gameFSM.request('cleanup')
DistributedMinigameAI.gameOver(self)
def enterInactive(self):
self.notify.debug('enterInactive')
def exitInactive(self):
pass
def enterPlay(self):
self.notify.debug('enterPlay')
def allToonsDone(self = self):
self.notify.debug('allToonsDone')
self.sendUpdate('setEveryoneDone')
if not ToonBlitzGlobals.EndlessGame:
self.gameOver()
def handleTimeout(avIds, self = self):
self.notify.debug('handleTimeout: avatars %s did not report "done"' % avIds)
self.setGameAbort()
self.doneBarrier = ToonBarrier('waitClientsDone', self.uniqueName('waitClientsDone'), self.avIdList, ToonBlitzGlobals.GameDuration[self.getSafezoneId()] + ToonBlitzGlobals.ShowScoresDuration + MinigameGlobals.latencyTolerance, allToonsDone, handleTimeout)
def exitPlay(self):
pass
def enterCleanup(self):
self.notify.debug('enterCleanup')
self.doneBarrier.cleanup()
del self.doneBarrier
self.gameFSM.request('inactive')
def exitCleanup(self):
pass
def claimTreasure(self, sectionIndex, treasureIndex):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('treasure %s-%s claimed by %s' % (sectionIndex, treasureIndex, avId))
if sectionIndex < 0 or sectionIndex >= len(self.sectionsSelected):
self.air.writeServerEvent('warning', sectionIndex, 'TwoDGameAI.claimTreasure sectionIndex out of range.')
return
if treasureIndex < 0 or treasureIndex >= len(self.treasureTakenTable[sectionIndex]):
self.notify.warning('Treasure %s: TwoDGameAI.claimTreasure treasureIndex out of range.' % treasureIndex)
self.air.writeServerEvent('warning', treasureIndex, 'TwoDGameAI.claimTreasure treasureIndex out of range.')
return
if self.treasureTakenTable[sectionIndex][treasureIndex]:
return
initialTreasureList = self.sectionsSelected[sectionIndex][2]
if treasureIndex < len(initialTreasureList):
treasureValue = initialTreasureList[treasureIndex][1]
else:
treasureValue = self.numPlayers
self.treasureTakenTable[sectionIndex][treasureIndex] = treasureValue
self.treasuresCollectedDict[avId][treasureValue - 1] += 1
self.scoreDict[avId] += ToonBlitzGlobals.ScoreGainPerTreasure * treasureValue
self.numTreasuresTaken += 1
self.sendUpdate('setTreasureGrabbed', [avId, sectionIndex, treasureIndex])
def claimEnemyShot(self, sectionIndex, enemyIndex):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('enemy %s-%s shot claimed by %s' % (sectionIndex, enemyIndex, avId))
if sectionIndex < 0 or sectionIndex >= len(self.sectionsSelected):
self.air.writeServerEvent('warning', sectionIndex, 'TwoDGameAI.claimEnemyShot sectionIndex out of range.')
return
if enemyIndex < 0 or enemyIndex >= len(self.sectionsSelected[sectionIndex][1]):
self.air.writeServerEvent('warning', enemyIndex, 'TwoDGameAI.claimEnemyShot enemyIndex out of range.')
return
if self.enemyHealthTable[sectionIndex][enemyIndex] > 0:
self.enemyHealthTable[sectionIndex][enemyIndex] -= ToonBlitzGlobals.DamagePerBullet
if self.enemyHealthTable[sectionIndex][enemyIndex] <= 0:
self.numEnemiesKilled += 1
self.sendUpdate('setEnemyShot', [avId,
sectionIndex,
enemyIndex,
self.enemyHealthTable[sectionIndex][enemyIndex]])
def reportDone(self):
if self.gameFSM.getCurrentState() == None or self.gameFSM.getCurrentState().getName() != 'play':
return
avId = self.air.getAvatarIdFromSender()
self.notify.debug('reportDone: avatar %s is done' % avId)
self.doneBarrier.clear(avId)
return
def toonVictory(self, avId, timestamp):
if self.gameFSM.getCurrentState() == None or self.gameFSM.getCurrentState().getName() != 'play':
msg = 'TwoDGameAI.toonVictory not in play state!'
self.notify.warning('suspicious: ' + str(avId) + ' ' + msg)
self.air.writeServerEvent('suspicious', avId=avId, issue=msg)
return
if avId not in self.scoreDict.keys():
self.notify.warning('Avatar %s not in list.' % avId)
self.air.writeServerEvent('suspicious', avId=avId, issue='TwoDGameAI.toonVictory toon not in list.')
return
curTime = self.getCurrentGameTime()
timeLeft = ToonBlitzGlobals.GameDuration[self.getSafezoneId()] - curTime
self.notify.debug('curTime =%s timeLeft = %s' % (curTime, timeLeft))
addBonus = int(ToonBlitzGlobals.BaseBonusOnCompletion[self.getSafezoneId()] + ToonBlitzGlobals.BonusPerSecondLeft * timeLeft)
self.notify.debug('addBOnus = %d' % addBonus)
if addBonus < 0:
addBonus = 0
self.finishedBonusDict[avId] = addBonus
timeLeftStr = '%.1f' % timeLeft
self.finishedTimeLeftDict[avId] = timeLeftStr
self.scoreDict[avId] += addBonus
self.sendUpdate('addVictoryScore', [avId, addBonus])
self.doneBarrier.clear(avId)
return
def toonFellDown(self, avId, timestamp):
if avId not in self.scoreDict.keys():
self.notify.warning('Avatar %s not in list.' % avId)
self.air.writeServerEvent('warning', avId=avId, issue='TwoDGameAI.toonFellDown toon not in list.')
return
self.numFallDownDict[avId] += 1
self.scoreDict[avId] += ToonBlitzGlobals.ScoreLossPerFallDown[self.getSafezoneId()]
def toonHitByEnemy(self, avId, timestamp):
if avId not in self.scoreDict.keys():
self.notify.warning('Avatar %s not in list.' % avId)
self.air.writeServerEvent('warning', avId=avId, issue='TwoDGameAI.toonHitByEnemy toon not in list.')
return
self.numHitByEnemyDict[avId] += 1
self.scoreDict[avId] += ToonBlitzGlobals.ScoreLossPerEnemyCollision[self.getSafezoneId()]
def toonSquished(self, avId, timestamp):
if avId not in self.scoreDict.keys():
self.notify.warning('Avatar %s not in list.' % avId)
self.air.writeServerEvent('warning', avId=avId, issue='TwoDGameAI.toonSquished toon not in list.')
return
self.numSquishDict[avId] += 1
self.scoreDict[avId] += ToonBlitzGlobals.ScoreLossPerStomperSquish[self.getSafezoneId()]
def setupSections(self):
szId = self.getSafezoneId()
sectionWeights = ToonBlitzGlobals.SectionWeights[szId]
numSections = ToonBlitzGlobals.NumSections[szId]
difficultyPool = []
difficultyList = []
sectionsPool = ToonBlitzGlobals.SectionsPool
sectionTypes = ToonBlitzGlobals.SectionTypes
sectionsPoolByDifficulty = [[],
[],
[],
[],
[],
[]]
sectionsSelectedByDifficulty = [[],
[],
[],
[],
[],
[]]
sectionIndicesSelected = []
for weight in sectionWeights:
difficulty, probability = weight
difficultyPool += [difficulty] * probability
for i in xrange(numSections):
difficulty = random.choice(difficultyPool)
difficultyList.append(difficulty)
difficultyList.sort()
for sectionIndex in sectionsPool:
difficulty = sectionTypes[sectionIndex][0]
sectionsPoolByDifficulty[difficulty] += [sectionIndex]
for targetDifficulty in difficultyList:
whileCount = 0
difficulty = targetDifficulty
while not len(sectionsPoolByDifficulty[difficulty]) > 0:
difficulty += 1
if difficulty >= 5:
difficulty = 0
whileCount += 1
if whileCount > 1:
break
else:
sectionIndexChoice = random.choice(sectionsPoolByDifficulty[difficulty])
sectionsSelectedByDifficulty[difficulty] += [sectionIndexChoice]
sectionsPoolByDifficulty[difficulty].remove(sectionIndexChoice)
if whileCount > 1:
self.notify.debug('We need more sections than we have choices. We have to now repeat.')
for i in xrange(len(sectionsSelectedByDifficulty)):
for j in xrange(len(sectionsSelectedByDifficulty[i])):
sectionIndicesSelected.append(sectionsSelectedByDifficulty[i][j])
for i in xrange(len(sectionIndicesSelected)):
sectionIndex = sectionIndicesSelected[i]
self.sectionIndexList.append(sectionIndex)
attribs = sectionTypes[sectionIndex]
difficulty = attribs[0]
length = attribs[1]
blocksPool = attribs[2]
enemiesPool = attribs[3]
treasuresPool = attribs[4]
spawnPointsPool = attribs[5]
stompersPool = attribs[6]
enemyIndicesPool = []
enemyIndicesSelected = []
if enemiesPool != None:
minEnemies, maxEnemies = attribs[7]
for i in xrange(len(enemiesPool)):
enemyIndicesPool += [i]
numEnemies = maxEnemies * ToonBlitzGlobals.PercentMaxEnemies[szId] / 100
numEnemies = max(numEnemies, minEnemies)
for j in xrange(int(numEnemies)):
if len(enemyIndicesPool) == 0:
break
enemyIndex = random.choice(enemyIndicesPool)
enemyIndicesSelected.append(enemyIndex)
enemyIndicesPool.remove(enemyIndex)
enemyIndicesSelected.sort()
treasureIndicesPool = []
treasureValuePool = []
for value in range(1, 5):
treasureValuePool += [value] * ToonBlitzGlobals.TreasureValueProbability[value]
treasureIndicesSelected = []
if treasuresPool != None:
minTreasures, maxTreasures = attribs[8]
for i in xrange(len(treasuresPool)):
treasureIndicesPool += [i]
numTreasures = maxTreasures * ToonBlitzGlobals.PercentMaxTreasures[szId] / 100
numTreasures = max(numTreasures, minTreasures)
for i in xrange(int(numTreasures)):
if len(treasureIndicesPool) == 0:
break
treasureIndex = random.choice(treasureIndicesPool)
treasureValue = random.choice(treasureValuePool)
treasure = (treasureIndex, treasureValue)
treasureIndicesPool.remove(treasureIndex)
treasureIndicesSelected.append(treasure)
treasureIndicesSelected.sort()
spawnPointIndicesPool = []
spawnPointIndicesSelected = []
if spawnPointsPool != None:
minSpawnPoints, maxSpawnPoints = attribs[9]
for i in xrange(len(spawnPointsPool)):
spawnPointIndicesPool += [i]
numSpawnPoints = maxSpawnPoints * ToonBlitzGlobals.PercentMaxSpawnPoints[szId] / 100
numSpawnPoints = max(numSpawnPoints, minSpawnPoints)
for i in xrange(int(numSpawnPoints)):
if len(spawnPointIndicesPool) == 0:
break
spawnPoint = random.choice(spawnPointIndicesPool)
spawnPointIndicesSelected.append(spawnPoint)
spawnPointIndicesPool.remove(spawnPoint)
spawnPointIndicesSelected.sort()
stomperIndicesPool = []
stomperIndicesSelected = []
if stompersPool != None:
minStompers, maxStompers = attribs[10]
for i in xrange(len(stompersPool)):
stomperIndicesPool += [i]
numStompers = maxStompers * ToonBlitzGlobals.PercentMaxStompers[szId] / 100
numStompers = max(numStompers, minStompers)
for i in xrange(int(numStompers)):
if len(stomperIndicesPool) == 0:
break
stomper = random.choice(stomperIndicesPool)
stomperIndicesSelected.append(stomper)
stomperIndicesPool.remove(stomper)
stomperIndicesSelected.sort()
sctionTuple = (sectionIndex,
enemyIndicesSelected,
treasureIndicesSelected,
spawnPointIndicesSelected,
stomperIndicesSelected)
self.sectionsSelected.append(sctionTuple)
return
def getSectionsSelected(self):
return self.sectionsSelected
|
|
# pylint: disable=function-redefined
from typing import Dict, Union
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import Buffer, SelectionType, indent, unindent
from prompt_toolkit.completion import CompleteEvent
from prompt_toolkit.filters import (
Condition,
emacs_insert_mode,
emacs_mode,
has_arg,
has_selection,
in_paste_mode,
is_multiline,
is_read_only,
shift_selection_mode,
vi_search_direction_reversed,
)
from prompt_toolkit.key_binding.key_bindings import Binding
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.keys import Keys
from ..key_bindings import ConditionalKeyBindings, KeyBindings, KeyBindingsBase
from .named_commands import get_by_name
__all__ = [
"load_emacs_bindings",
"load_emacs_search_bindings",
"load_emacs_shift_selection_bindings",
]
E = KeyPressEvent
def load_emacs_bindings() -> KeyBindingsBase:
"""
Some e-macs extensions.
"""
# Overview of Readline emacs commands:
# http://www.catonmat.net/download/readline-emacs-editing-mode-cheat-sheet.pdf
key_bindings = KeyBindings()
handle = key_bindings.add
insert_mode = emacs_insert_mode
@handle("escape")
def _esc(event: E) -> None:
"""
By default, ignore escape key.
(If we don't put this here, and Esc is followed by a key which sequence
is not handled, we'll insert an Escape character in the input stream.
Something we don't want and happens to easily in emacs mode.
Further, people can always use ControlQ to do a quoted insert.)
"""
pass
handle("c-a")(get_by_name("beginning-of-line"))
handle("c-b")(get_by_name("backward-char"))
handle("c-delete", filter=insert_mode)(get_by_name("kill-word"))
handle("c-e")(get_by_name("end-of-line"))
handle("c-f")(get_by_name("forward-char"))
handle("c-left")(get_by_name("backward-word"))
handle("c-right")(get_by_name("forward-word"))
handle("c-x", "r", "y", filter=insert_mode)(get_by_name("yank"))
handle("c-y", filter=insert_mode)(get_by_name("yank"))
handle("escape", "b")(get_by_name("backward-word"))
handle("escape", "c", filter=insert_mode)(get_by_name("capitalize-word"))
handle("escape", "d", filter=insert_mode)(get_by_name("kill-word"))
handle("escape", "f")(get_by_name("forward-word"))
handle("escape", "l", filter=insert_mode)(get_by_name("downcase-word"))
handle("escape", "u", filter=insert_mode)(get_by_name("uppercase-word"))
handle("escape", "y", filter=insert_mode)(get_by_name("yank-pop"))
handle("escape", "backspace", filter=insert_mode)(get_by_name("backward-kill-word"))
handle("escape", "\\", filter=insert_mode)(get_by_name("delete-horizontal-space"))
handle("c-home")(get_by_name("beginning-of-buffer"))
handle("c-end")(get_by_name("end-of-buffer"))
handle("c-_", save_before=(lambda e: False), filter=insert_mode)(
get_by_name("undo")
)
handle("c-x", "c-u", save_before=(lambda e: False), filter=insert_mode)(
get_by_name("undo")
)
handle("escape", "<", filter=~has_selection)(get_by_name("beginning-of-history"))
handle("escape", ">", filter=~has_selection)(get_by_name("end-of-history"))
handle("escape", ".", filter=insert_mode)(get_by_name("yank-last-arg"))
handle("escape", "_", filter=insert_mode)(get_by_name("yank-last-arg"))
handle("escape", "c-y", filter=insert_mode)(get_by_name("yank-nth-arg"))
handle("escape", "#", filter=insert_mode)(get_by_name("insert-comment"))
handle("c-o")(get_by_name("operate-and-get-next"))
# ControlQ does a quoted insert. Not that for vt100 terminals, you have to
# disable flow control by running ``stty -ixon``, otherwise Ctrl-Q and
# Ctrl-S are captured by the terminal.
handle("c-q", filter=~has_selection)(get_by_name("quoted-insert"))
handle("c-x", "(")(get_by_name("start-kbd-macro"))
handle("c-x", ")")(get_by_name("end-kbd-macro"))
handle("c-x", "e")(get_by_name("call-last-kbd-macro"))
@handle("c-n")
def _next(event: E) -> None:
" Next line. "
event.current_buffer.auto_down()
@handle("c-p")
def _prev(event: E) -> None:
" Previous line. "
event.current_buffer.auto_up(count=event.arg)
def handle_digit(c: str) -> None:
"""
Handle input of arguments.
The first number needs to be preceded by escape.
"""
@handle(c, filter=has_arg)
@handle("escape", c)
def _(event: E) -> None:
event.append_to_arg_count(c)
for c in "0123456789":
handle_digit(c)
@handle("escape", "-", filter=~has_arg)
def _meta_dash(event: E) -> None:
"""
"""
if event._arg is None:
event.append_to_arg_count("-")
@handle("-", filter=Condition(lambda: get_app().key_processor.arg == "-"))
def _dash(event: E) -> None:
"""
When '-' is typed again, after exactly '-' has been given as an
argument, ignore this.
"""
event.app.key_processor.arg = "-"
@Condition
def is_returnable() -> bool:
return get_app().current_buffer.is_returnable
# Meta + Enter: always accept input.
handle("escape", "enter", filter=insert_mode & is_returnable)(
get_by_name("accept-line")
)
# Enter: accept input in single line mode.
handle("enter", filter=insert_mode & is_returnable & ~is_multiline)(
get_by_name("accept-line")
)
def character_search(buff: Buffer, char: str, count: int) -> None:
if count < 0:
match = buff.document.find_backwards(
char, in_current_line=True, count=-count
)
else:
match = buff.document.find(char, in_current_line=True, count=count)
if match is not None:
buff.cursor_position += match
@handle("c-]", Keys.Any)
def _goto_char(event: E) -> None:
" When Ctl-] + a character is pressed. go to that character. "
# Also named 'character-search'
character_search(event.current_buffer, event.data, event.arg)
@handle("escape", "c-]", Keys.Any)
def _goto_char_backwards(event: E) -> None:
" Like Ctl-], but backwards. "
# Also named 'character-search-backward'
character_search(event.current_buffer, event.data, -event.arg)
@handle("escape", "a")
def _prev_sentence(event: E) -> None:
" Previous sentence. "
# TODO:
@handle("escape", "e")
def _end_of_sentence(event: E) -> None:
" Move to end of sentence. "
# TODO:
@handle("escape", "t", filter=insert_mode)
def _swap_characters(event: E) -> None:
"""
Swap the last two words before the cursor.
"""
# TODO
@handle("escape", "*", filter=insert_mode)
def _insert_all_completions(event: E) -> None:
"""
`meta-*`: Insert all possible completions of the preceding text.
"""
buff = event.current_buffer
# List all completions.
complete_event = CompleteEvent(text_inserted=False, completion_requested=True)
completions = list(
buff.completer.get_completions(buff.document, complete_event)
)
# Insert them.
text_to_insert = " ".join(c.text for c in completions)
buff.insert_text(text_to_insert)
@handle("c-x", "c-x")
def _toggle_start_end(event: E) -> None:
"""
Move cursor back and forth between the start and end of the current
line.
"""
buffer = event.current_buffer
if buffer.document.is_cursor_at_the_end_of_line:
buffer.cursor_position += buffer.document.get_start_of_line_position(
after_whitespace=False
)
else:
buffer.cursor_position += buffer.document.get_end_of_line_position()
@handle("c-@") # Control-space or Control-@
def _start_selection(event: E) -> None:
"""
Start of the selection (if the current buffer is not empty).
"""
# Take the current cursor position as the start of this selection.
buff = event.current_buffer
if buff.text:
buff.start_selection(selection_type=SelectionType.CHARACTERS)
@handle("c-g", filter=~has_selection)
def _cancel(event: E) -> None:
"""
Control + G: Cancel completion menu and validation state.
"""
event.current_buffer.complete_state = None
event.current_buffer.validation_error = None
@handle("c-g", filter=has_selection)
def _cancel_selection(event: E) -> None:
"""
Cancel selection.
"""
event.current_buffer.exit_selection()
@handle("c-w", filter=has_selection)
@handle("c-x", "r", "k", filter=has_selection)
def _cut(event: E) -> None:
"""
Cut selected text.
"""
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
@handle("escape", "w", filter=has_selection)
def _copy(event: E) -> None:
"""
Copy selected text.
"""
data = event.current_buffer.copy_selection()
event.app.clipboard.set_data(data)
@handle("escape", "left")
def _start_of_word(event: E) -> None:
"""
Cursor to start of previous word.
"""
buffer = event.current_buffer
buffer.cursor_position += (
buffer.document.find_previous_word_beginning(count=event.arg) or 0
)
@handle("escape", "right")
def _start_next_word(event: E) -> None:
"""
Cursor to start of next word.
"""
buffer = event.current_buffer
buffer.cursor_position += (
buffer.document.find_next_word_beginning(count=event.arg)
or buffer.document.get_end_of_document_position()
)
@handle("escape", "/", filter=insert_mode)
def _complete(event: E) -> None:
"""
M-/: Complete.
"""
b = event.current_buffer
if b.complete_state:
b.complete_next()
else:
b.start_completion(select_first=True)
@handle("c-c", ">", filter=has_selection)
def _indent(event: E) -> None:
"""
Indent selected text.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_start_of_line_position(
after_whitespace=True
)
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
indent(buffer, from_, to + 1, count=event.arg)
@handle("c-c", "<", filter=has_selection)
def _unindent(event: E) -> None:
"""
Unindent selected text.
"""
buffer = event.current_buffer
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
unindent(buffer, from_, to + 1, count=event.arg)
return ConditionalKeyBindings(key_bindings, emacs_mode)
def load_emacs_search_bindings() -> KeyBindingsBase:
key_bindings = KeyBindings()
handle = key_bindings.add
from . import search
# NOTE: We don't bind 'Escape' to 'abort_search'. The reason is that we
# want Alt+Enter to accept input directly in incremental search mode.
# Instead, we have double escape.
handle("c-r")(search.start_reverse_incremental_search)
handle("c-s")(search.start_forward_incremental_search)
handle("c-c")(search.abort_search)
handle("c-g")(search.abort_search)
handle("c-r")(search.reverse_incremental_search)
handle("c-s")(search.forward_incremental_search)
handle("up")(search.reverse_incremental_search)
handle("down")(search.forward_incremental_search)
handle("enter")(search.accept_search)
# Handling of escape.
handle("escape", eager=True)(search.accept_search)
# Like Readline, it's more natural to accept the search when escape has
# been pressed, however instead the following two bindings could be used
# instead.
# #handle('escape', 'escape', eager=True)(search.abort_search)
# #handle('escape', 'enter', eager=True)(search.accept_search_and_accept_input)
# If Read-only: also include the following key bindings:
# '/' and '?' key bindings for searching, just like Vi mode.
handle("?", filter=is_read_only & ~vi_search_direction_reversed)(
search.start_reverse_incremental_search
)
handle("/", filter=is_read_only & ~vi_search_direction_reversed)(
search.start_forward_incremental_search
)
handle("?", filter=is_read_only & vi_search_direction_reversed)(
search.start_forward_incremental_search
)
handle("/", filter=is_read_only & vi_search_direction_reversed)(
search.start_reverse_incremental_search
)
@handle("n", filter=is_read_only)
def _jump_next(event: E) -> None:
" Jump to next match. "
event.current_buffer.apply_search(
event.app.current_search_state,
include_current_position=False,
count=event.arg,
)
@handle("N", filter=is_read_only)
def _jump_prev(event: E) -> None:
" Jump to previous match. "
event.current_buffer.apply_search(
~event.app.current_search_state,
include_current_position=False,
count=event.arg,
)
return ConditionalKeyBindings(key_bindings, emacs_mode)
def load_emacs_shift_selection_bindings() -> KeyBindingsBase:
"""
Bindings to select text with shift + cursor movements
"""
key_bindings = KeyBindings()
handle = key_bindings.add
def unshift_move(event: E) -> None:
"""
Used for the shift selection mode. When called with
a shift + movement key press event, moves the cursor
as if shift is not pressed.
"""
key = event.key_sequence[0].key
if key == Keys.ShiftUp:
event.current_buffer.auto_up(count=event.arg)
return
if key == Keys.ShiftDown:
event.current_buffer.auto_down(count=event.arg)
return
# the other keys are handled through their readline command
key_to_command: Dict[Union[Keys, str], str] = {
Keys.ShiftLeft: "backward-char",
Keys.ShiftRight: "forward-char",
Keys.ShiftHome: "beginning-of-line",
Keys.ShiftEnd: "end-of-line",
Keys.ControlShiftLeft: "backward-word",
Keys.ControlShiftRight: "forward-word",
Keys.ControlShiftHome: "beginning-of-buffer",
Keys.ControlShiftEnd: "end-of-buffer",
}
try:
# Both the dict lookup and `get_by_name` can raise KeyError.
handler = get_by_name(key_to_command[key])
except KeyError:
pass
else: # (`else` is not really needed here.)
if not isinstance(handler, Binding):
# (It should always be a normal callable here, for these
# commands.)
handler(event)
@handle("s-left", filter=~has_selection)
@handle("s-right", filter=~has_selection)
@handle("s-up", filter=~has_selection)
@handle("s-down", filter=~has_selection)
@handle("s-home", filter=~has_selection)
@handle("s-end", filter=~has_selection)
@handle("c-s-left", filter=~has_selection)
@handle("c-s-right", filter=~has_selection)
@handle("c-s-home", filter=~has_selection)
@handle("c-s-end", filter=~has_selection)
def _start_selection(event: E) -> None:
"""
Start selection with shift + movement.
"""
# Take the current cursor position as the start of this selection.
buff = event.current_buffer
if buff.text:
buff.start_selection(selection_type=SelectionType.CHARACTERS)
if buff.selection_state is not None:
# (`selection_state` should never be `None`, it is created by
# `start_selection`.)
buff.selection_state.enter_shift_mode()
# Then move the cursor
original_position = buff.cursor_position
unshift_move(event)
if buff.cursor_position == original_position:
# Cursor didn't actually move - so cancel selection
# to avoid having an empty selection
buff.exit_selection()
@handle("s-left", filter=shift_selection_mode)
@handle("s-right", filter=shift_selection_mode)
@handle("s-up", filter=shift_selection_mode)
@handle("s-down", filter=shift_selection_mode)
@handle("s-home", filter=shift_selection_mode)
@handle("s-end", filter=shift_selection_mode)
@handle("c-s-left", filter=shift_selection_mode)
@handle("c-s-right", filter=shift_selection_mode)
@handle("c-s-home", filter=shift_selection_mode)
@handle("c-s-end", filter=shift_selection_mode)
def _extend_selection(event: E) -> None:
"""
Extend the selection
"""
# Just move the cursor, like shift was not pressed
unshift_move(event)
buff = event.current_buffer
if buff.selection_state is not None:
if buff.cursor_position == buff.selection_state.original_cursor_position:
# selection is now empty, so cancel selection
buff.exit_selection()
@handle(Keys.Any, filter=shift_selection_mode)
def _replace_selection(event: E) -> None:
"""
Replace selection by what is typed
"""
event.current_buffer.cut_selection()
get_by_name("self-insert").call(event)
@handle("enter", filter=shift_selection_mode & is_multiline)
def _newline(event: E) -> None:
"""
A newline replaces the selection
"""
event.current_buffer.cut_selection()
event.current_buffer.newline(copy_margin=not in_paste_mode())
@handle("backspace", filter=shift_selection_mode)
def _delete(event: E) -> None:
"""
Delete selection.
"""
event.current_buffer.cut_selection()
@handle("c-y", filter=shift_selection_mode)
def _yank(event: E) -> None:
"""
In shift selection mode, yanking (pasting) replace the selection.
"""
buff = event.current_buffer
if buff.selection_state:
buff.cut_selection()
get_by_name("yank").call(event)
# moving the cursor in shift selection mode cancels the selection
@handle("left", filter=shift_selection_mode)
@handle("right", filter=shift_selection_mode)
@handle("up", filter=shift_selection_mode)
@handle("down", filter=shift_selection_mode)
@handle("home", filter=shift_selection_mode)
@handle("end", filter=shift_selection_mode)
@handle("c-left", filter=shift_selection_mode)
@handle("c-right", filter=shift_selection_mode)
@handle("c-home", filter=shift_selection_mode)
@handle("c-end", filter=shift_selection_mode)
def _cancel(event: E) -> None:
"""
Cancel selection.
"""
event.current_buffer.exit_selection()
# we then process the cursor movement
key_press = event.key_sequence[0]
event.key_processor.feed(key_press, first=True)
return ConditionalKeyBindings(key_bindings, emacs_mode)
|
|
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.abstract_plotting_library nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#===============================================================================
# Make sure that the necessary files and functions are
# defined in the plotting library:
class AbstractPlottingLibrary(object):
def __init__(self):
"""
Set the defaults dictionary in the _defaults variable:
E.g. for matplotlib we define a file defaults.py and
set the dictionary of it here:
from . import defaults
_defaults = defaults.__dict__
"""
self._defaults = {}
self.__defaults = None
@property
def defaults(self):
#===============================================================================
if self.__defaults is None:
from collections import defaultdict
class defaultdict(defaultdict):
def __getattr__(self, *args, **kwargs):
return defaultdict.__getitem__(self, *args, **kwargs)
self.__defaults = defaultdict(dict, self._defaults)
return self.__defaults
#===============================================================================
def figure(self, nrows, ncols, **kwargs):
"""
Get a new figure with nrows and ncolumns subplots.
Does not initialize the canvases yet.
There is individual kwargs for the individual plotting libraries to use.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def new_canvas(self, figure=None, col=1, row=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
"""
Return a canvas, kwargupdate for your plotting library.
if figure is not None, create a canvas in the figure
at subplot position (col, row).
This method does two things, it creates an empty canvas
and updates the kwargs (deletes the unnecessary kwargs)
for further usage in normal plotting.
the kwargs are plotting library specific kwargs!
:param {'2d'|'3d'} projection: The projection to use.
E.g. in matplotlib this means it deletes references to ax, as
plotting is done on the axis itself and is not a kwarg.
:param xlabel: the label to put on the xaxis
:param ylabel: the label to put on the yaxis
:param zlabel: the label to put on the zaxis (if plotting in 3d)
:param title: the title of the plot
:param legend: if True, plot a legend, if int make legend rows in the legend
:param (float, float) xlim: the limits for the xaxis
:param (float, float) ylim: the limits for the yaxis
:param (float, float) zlim: the limits for the zaxis (if plotting in 3d)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def add_to_canvas(self, canvas, plots, legend=True, title=None, **kwargs):
"""
Add plots is a dictionary with the plots as the
items or a list of plots as items to canvas.
The kwargs are plotting library specific kwargs!
E.g. in matplotlib this does not have to do anything to add stuff, but
we set the legend and title.
!This function returns the updated canvas!
:param title: the title of the plot
:param legend: whether to plot a legend or not
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def show_canvas(self, canvas, **kwargs):
"""
Draw/Plot the canvas given.
"""
raise NotImplementedError
def plot(self, cavas, X, Y, Z=None, color=None, label=None, **kwargs):
"""
Make a line plot from for Y on X (Y = f(X)) on the canvas.
If Z is not None, plot in 3d!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def plot_axis_lines(self, ax, X, color=None, label=None, **kwargs):
"""
Plot lines at the bottom (lower boundary of yaxis) of the axis at input location X.
If X is two dimensional, plot in 3d and connect the axis lines to the bottom of the Z axis.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def surface(self, canvas, X, Y, Z, color=None, label=None, **kwargs):
"""
Plot a surface for 3d plotting for the inputs (X, Y, Z).
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def scatter(self, canvas, X, Y, Z=None, color=None, vmin=None, vmax=None, label=None, **kwargs):
"""
Make a scatter plot between X and Y on the canvas given.
the kwargs are plotting library specific kwargs!
:param canvas: the plotting librarys specific canvas to plot on.
:param array-like X: the inputs to plot.
:param array-like Y: the outputs to plot.
:param array-like Z: the Z level to plot (if plotting 3d).
:param array-like c: the colorlevel for each point.
:param float vmin: minimum colorscale
:param float vmax: maximum colorscale
:param kwargs: the specific kwargs for your plotting library
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=None, label=None, **kwargs):
"""
Plot vertical bar plot centered at x with height
and width of bars. The y level is at bottom.
the kwargs are plotting library specific kwargs!
:param array-like x: the center points of the bars
:param array-like height: the height of the bars
:param array-like width: the width of the bars
:param array-like bottom: the start y level of the bars
:param kwargs: kwargs for the specific library you are using.
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make an errorbar along the xaxis for points at (X,Y) on the canvas.
if error is two dimensional, the lower error is error[:,0] and
the upper error is error[:,1]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def yerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs):
"""
Make errorbars along the yaxis on the canvas given.
if error is two dimensional, the lower error is error[0, :] and
the upper error is error[1, :]
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow(self, canvas, X, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
Show the image stored in X on the canvas.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def imshow_interact(self, canvas, plot_function, extent=None, label=None, vmin=None, vmax=None, **kwargs):
"""
This function is optional!
Create an imshow controller to stream
the image returned by the plot_function. There is an imshow controller written for
mmatplotlib, which updates the imshow on changes in axis.
The origin of the image show is (0,0), such that X[0,0] gets plotted at [0,0] of the image!
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap(self, canvas, X, annotation, extent, label=None, **kwargs):
"""
Plot an annotation heatmap. That is like an imshow, but
put the text of the annotation inside the cells of the heatmap (centered).
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def annotation_heatmap_interact(self, canvas, plot_function, extent, label=None, resolution=15, **kwargs):
"""
if plot_function is not None, return an interactive updated
heatmap, which updates on axis events, so that one can zoom in
and out and the heatmap gets updated. See the matplotlib implementation
in matplot_dep.controllers.
the plot_function returns a pair (X, annotation) to plot, when called with
a new input X (which would be the grid, which is visible on the plot
right now)
:param canvas: the canvas to plot on
:param array-like annotation: the annotation labels for the heatmap
:param [horizontal_min,horizontal_max,vertical_min,vertical_max] extent: the extent of where to place the heatmap
:param str label: the label for the heatmap
:return: a list of both the heatmap and annotation plots [heatmap, annotation], or the interactive update object (alone)
:param plot_function: the function, which generates new data for given input locations X
:param int resolution: the resolution of the interactive plot redraw - this is only needed when giving a plot_function
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def contour(self, canvas, X, Y, C, Z=None, color=None, label=None, **kwargs):
"""
Make a contour plot at (X, Y) with heights/colors stored in C on the canvas.
if Z is not None: make 3d contour plot at (X, Y, Z) with heights/colors stored in C on the canvas.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_between(self, canvas, X, lower, upper, color=None, label=None, **kwargs):
"""
Fill along the xaxis between lower and upper.
the kwargs are plotting library specific kwargs!
"""
raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library")
def fill_gradient(self, canvas, X, percentiles, color=None, label=None, **kwargs):
"""
Plot a gradient (in alpha values) for the given percentiles.
the kwargs are plotting library specific kwargs!
"""
print("fill_gradient not implemented in this backend.")
|
|
"""
Tests for the backend objects. We instantiate local copies of
the backends and invoke the entry points for the protocol methods.
We do not set up any server processes or communicate over sockets.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import glob
import unittest
import pysam
import ga4gh.exceptions as exceptions
import ga4gh.backend as backend
import ga4gh.protocol as protocol
import ga4gh.datamodel.references as references
import ga4gh.datamodel.variants as variants
class BackendForTesting(backend.AbstractBackend):
"""
A backend to test abstract methods
"""
class TestAbstractBackend(unittest.TestCase):
"""
Provides testing harness for testing methods in AbstractBackend,
using an instance of the mock SimulatedBackend object.
"""
def setUp(self):
self._backend = backend.SimulatedBackend(
numCalls=100, numVariantSets=10)
# TODO arbitrary values, pepper to taste
def resultIterator(
self, request, pageSize, searchMethod, ResponseClass, listMember):
"""
Returns an iterator over the list of results from the specified
request. All results are returned, and paging is handled
automatically.
"""
notDone = True
request.pageSize = pageSize
while notDone:
# TODO validate the response there.
responseStr = searchMethod(request.toJsonString())
response = ResponseClass.fromJsonString(responseStr)
objectList = getattr(response, listMember)
self.assertLessEqual(len(objectList), pageSize)
for obj in objectList:
yield obj
notDone = response.nextPageToken is not None
request.pageToken = response.nextPageToken
def getVariantSets(self, pageSize=100):
"""
Returns an iterator over the variantSets, abstracting away
the details of the pageSize.
"""
request = protocol.SearchVariantSetsRequest()
request.datasetId = self._backend.getDatasetIds()[0]
return self.resultIterator(
request, pageSize, self._backend.runSearchVariantSets,
protocol.SearchVariantSetsResponse, "variantSets")
def getVariants(
self, variantSetIds, referenceName, start=0, end=2 ** 32,
pageSize=100, callSetIds=None):
"""
Returns an iterator over the specified list of variants,
abstracting out paging details.
"""
request = protocol.SearchVariantsRequest()
request.variantSetIds = variantSetIds
request.referenceName = referenceName
request.start = start
request.end = end
request.callSetIds = callSetIds
return self.resultIterator(
request, pageSize, self._backend.runSearchVariants,
protocol.SearchVariantsResponse, "variants")
def getCallSets(self, variantSetId, pageSize=100):
"""
Returns an iterator over the callsets in a specified
variant set.
"""
request = protocol.SearchCallSetsRequest()
request.variantSetIds = [variantSetId]
return self.resultIterator(
request, pageSize, self._backend.runSearchCallSets,
protocol.SearchCallSetsResponse, "callSets")
def testGetVariantSets(self):
datasetId = self._backend.getDatasetIds()[0]
sortedVariantSetsFromGetter = sorted(
self._backend.getDataset(datasetId).getVariantSets())
sortedVariantSetMapValues = sorted(
self._backend.getDataset(datasetId)._variantSetIdMap.values())
self.assertEqual(
sortedVariantSetMapValues, sortedVariantSetsFromGetter)
def testRunSearchRequest(self):
request = protocol.SearchVariantSetsRequest()
request.datasetId = self._backend.getDatasetIds()[0]
responseStr = self._backend.runSearchRequest(
request.toJsonString(), protocol.SearchVariantSetsRequest,
protocol.SearchVariantSetsResponse,
self._backend.variantSetsGenerator)
response = protocol.SearchVariantSetsResponse.fromJsonString(
responseStr)
self.assertTrue(
isinstance(response, protocol.SearchVariantSetsResponse))
def testRunGetRequest(self):
id_ = "anId"
obj = references.SimulatedReferenceSet(id_)
idMap = {id_: obj}
responseStr = self._backend.runGetRequest(idMap, id_)
class_ = protocol.ReferenceSet
response = class_.fromJsonString(responseStr)
self.assertTrue(isinstance(response, class_))
def testRunListReferenceBases(self):
id_ = "referenceSet0:srs0"
self.runListReferenceBases(id_)
def testSearchVariantSets(self):
request = protocol.SearchVariantSetsRequest()
request.datasetId = self._backend.getDatasetIds()[0]
responseStr = self._backend.runSearchVariantSets(
request.toJsonString())
response = protocol.SearchVariantSetsResponse.fromJsonString(
responseStr)
self.assertTrue(
isinstance(response, protocol.SearchVariantSetsResponse))
def testSearchVariants(self):
variantSetIds = [
variantSet.id for variantSet in self.getVariantSets(pageSize=1)]
request = protocol.SearchVariantsRequest()
request.variantSetId = variantSetIds[0]
responseStr = self._backend.runSearchVariants(request.toJsonString())
response = protocol.SearchVariantsResponse.fromJsonString(
responseStr)
self.assertTrue(
isinstance(response, protocol.SearchVariantsResponse))
def testSearchCallSets(self):
variantSetIds = [
variantSet.id for variantSet in self.getVariantSets(pageSize=1)]
request = protocol.SearchCallSetsRequest()
request.variantSetId = variantSetIds[0]
responseStr = self._backend.runSearchCallSets(request.toJsonString())
response = protocol.SearchCallSetsResponse.fromJsonString(
responseStr)
self.assertTrue(
isinstance(response, protocol.SearchCallSetsResponse))
def testVariantSetPagination(self):
results = []
for pageSize in range(1, 100):
variantSetIds = [
variantSet.id for variantSet in self.getVariantSets(
pageSize=pageSize)]
results.append(variantSetIds)
for result in results[1:]:
self.assertEqual(result, results[0])
def runListReferenceBases(self, id_):
requestArgs = {"start": 5, "end": 10, "pageToken": "0"}
responseStr = self._backend.runListReferenceBases(id_, requestArgs)
response = protocol.ListReferenceBasesResponse.fromJsonString(
responseStr)
self.assertTrue(
isinstance(response, protocol.ListReferenceBasesResponse))
class TestFileSystemBackend(TestAbstractBackend):
"""
Tests proper initialization of the filesystem backend using indexed
files in the tests/data directory.
"""
def setUp(self):
self._dataDir = os.path.join("tests", "data")
self._referencesDir = os.path.join(self._dataDir, "references")
self._datasetDir = os.path.join(self._dataDir, "dataset1")
self._variantsDir = os.path.join(self._datasetDir, "variants")
self._vcfs = {}
self._variants = []
self._referenceNames = set()
self._chromFileMap = {}
for relativePath in os.listdir(self._variantsDir):
pathToFiles = os.path.join(self._variantsDir, relativePath)
self._vcfs[relativePath] = []
for vcfFile in glob.glob(os.path.join(
pathToFiles, "*.vcf.gz")):
self._chromFileMap[relativePath] = {}
self._vcfs[relativePath].append(vcfFile)
vcf = pysam.VariantFile(filename=vcfFile)
for chrom in vcf.index:
self._chromFileMap[relativePath][chrom] = vcf
self._backend = backend.FileSystemBackend(self._dataDir)
def testVariantSetIds(self):
variantSets = [variantSet for variantSet in self.getVariantSets()]
self.assertEqual(len(variantSets), len(self._vcfs))
ids = set(variantSet.id for variantSet in variantSets)
datasetId = self._backend.getDatasetIds()[0]
dataset = self._backend.getDataset(datasetId)
vcfKeys = set()
for localId in self._vcfs.keys():
tmpVariantSet = variants.AbstractVariantSet(dataset, localId)
vcfKeys.add(tmpVariantSet.getId())
self.assertEqual(ids, vcfKeys)
def testRunListReferenceBases(self):
id_ = "example_1:simple"
self.runListReferenceBases(id_)
def testDatasetNotFound(self):
request = protocol.SearchVariantSetsRequest()
datasetId = 'doesNotExist'
request.datasetId = datasetId
with self.assertRaises(exceptions.DatasetNotFoundException):
self._backend.getDataset(request.datasetId)
class TestTopLevelObjectGenerator(unittest.TestCase):
"""
Tests the generator used for top level objects
"""
def setUp(self):
class FakeRequest(object):
pass
class FakeTopLevelObject(object):
def toProtocolElement(self):
return self
self.request = FakeRequest()
self.request.pageToken = None
self.idMap = {
"a": FakeTopLevelObject(),
"b": FakeTopLevelObject(),
"c": FakeTopLevelObject(),
}
self.idList = sorted(self.idMap.keys())
self.backend = backend.AbstractBackend()
def testPageToken(self):
self.request.pageToken = "1"
self._assertNumItems(2)
def testPageTokenNone(self):
self._assertNumItems(3)
def _assertNumItems(self, numItems):
iterator = self.backend._topLevelObjectGenerator(
self.request, self.idMap, self.idList)
items = list(iterator)
self.assertEqual(len(items), numItems)
class TestPrivateBackendMethods(unittest.TestCase):
"""
keep tests of private backend methods here and not in one of the
subclasses of TestAbstractBackend, otherwise the tests will needlessly
be run more than once
(they could be put in TestAbstractBackend, but I think it's a clearer
separation to put them in their own test class)
"""
def testParsePageToken(self):
goodPageToken = "12:34:567:8:9000"
parsedToken = backend._parsePageToken(goodPageToken, 5)
self.assertEqual(parsedToken[2], 567)
def testSafeMapQuery(self):
# test map
key = 'a'
value = 'b'
idMap = {key: value}
result = backend._safeMapQuery(idMap, key)
self.assertEqual(result, value)
# test array
arr = [value]
result = backend._safeMapQuery(arr, 0)
self.assertEqual(result, value)
# test exception with custom class
exceptionClass = exceptions.FileOpenFailedException
with self.assertRaises(exceptionClass):
backend._safeMapQuery(idMap, 'notFound', exceptionClass)
# test exception with custom id string
with self.assertRaises(exceptions.ObjectWithIdNotFoundException):
backend._safeMapQuery(idMap, 'notFound', idErrorString='msg')
|
|
"""
A place for code to be called from core C-code.
Some things are more easily handled Python.
"""
from __future__ import division, absolute_import, print_function
import re
import sys
from numpy.compat import asbytes, basestring
from .multiarray import dtype, array, ndarray
import ctypes
from .numerictypes import object_
if (sys.byteorder == 'little'):
_nbo = asbytes('<')
else:
_nbo = asbytes('>')
def _makenames_list(adict, align):
allfields = []
fnames = list(adict.keys())
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2, 3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', '|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', '|V%d' % num))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(asbytes(
r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)'))
sep_re = re.compile(asbytes(r'\s*,\s*'))
space_re = re.compile(asbytes(r'\s+$'))
# astr is a string (perhaps comma separated)
_convorder = {asbytes('='): _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError('format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == asbytes(''):
order = order1
elif order1 == asbytes(''):
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
order = asbytes('')
dtype = order + dtype
if (repeats == asbytes('')):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError("unknown field name: %s" % (name,))
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
fields = {}
offset = 0
explicit_name = False
this_explicit_name = False
common_alignment = 1
is_padding = False
dummy_name_index = [0]
def next_dummy_name():
dummy_name_index[0] += 1
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
if name not in fields:
return name
next_dummy_name()
# Parse spec
while spec:
value = None
# End of structure, bail out to upper level
if spec[0] == '}':
spec = spec[1:]
break
# Sub-arrays (1)
shape = None
if spec[0] == '(':
j = spec.index(')')
shape = tuple(map(int, spec[1:j].split(',')))
spec = spec[j+1:]
# Byte order
if spec[0] in ('@', '=', '<', '>', '^', '!'):
byteorder = spec[0]
if byteorder == '!':
byteorder = '>'
spec = spec[1:]
# Byte order characters also control native vs. standard type sizes
if byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize = 1
if spec[0].isdigit():
j = 1
for j in range(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
spec = spec[j:]
# Data types
is_padding = False
if spec[:2] == 'T{':
value, spec, align, next_byteorder = _dtype_from_pep3118(
spec[2:], byteorder=byteorder, is_subdtype=True)
elif spec[0] in type_map_chars:
next_byteorder = byteorder
if spec[0] == 'Z':
j = 2
else:
j = 1
typechar = spec[:j]
spec = spec[j:]
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = (align*common_alignment
/ _gcd(align, common_alignment))
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
this_explicit_name = False
if spec and spec.startswith(':'):
i = spec[1:].index(':') + 1
name = spec[1:i]
spec = spec[i+1:]
explicit_name = True
this_explicit_name = True
else:
name = get_dummy_name()
if not is_padding or this_explicit_name:
if name in fields:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
if not this_explicit_name:
next_dummy_name()
byteorder = next_byteorder
offset += value.itemsize
offset += extra_offset
# Check if this was a simple 1-item type
if (len(fields) == 1 and not explicit_name and
fields['f0'][1] == 0 and not is_subdtype):
ret = fields['f0'][0]
else:
ret = dtype(fields)
# Trailing padding must be explicitly added
padding = offset - ret.itemsize
if byteorder == '@':
padding += (-offset) % common_alignment
if is_padding and not this_explicit_name:
ret = _add_trailing_padding(ret, padding)
# Finished
if is_subdtype:
return ret, spec, common_alignment, byteorder
else:
return ret
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
vfields = {'f0': (value, 0)}
else:
vfields = dict(value.fields)
if (value.names and value.names[-1] == '' and
value[''].char == 'V'):
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
value = dtype(vfields)
else:
# Get a free name for the padding field
j = 0
while True:
name = 'pad%d' % j
if name not in vfields:
vfields[name] = ('V%d' % padding, value.itemsize)
break
j += 1
value = dtype(vfields)
if '' not in vfields:
# Strip out the name of the padding field
names = list(value.names)
names[-1] = ''
value.names = tuple(names)
return value
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
# Exception used in shares_memory()
class TooHardError(RuntimeError):
pass
|
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def fake_standard_return():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {}
}
def fake_host():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"api_compat": 1,
"auth": "trusted",
"config": {},
"environment": {
"backing_fs": "ext4",
"driver": "lxc",
"kernel_version": "3.19.0-22-generic",
"lxc_version": "1.1.2",
"lxd_version": "0.12"
}
}
}
def fake_image_list_empty():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": []
}
def fake_image_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": ['/1.0/images/trusty']
}
def fake_image_info():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"aliases": [
{
"target": "ubuntu",
"description": "ubuntu"
}
],
"architecture": 2,
"fingerprint": "04aac4257341478b49c25d22cea8a6ce"
"0489dc6c42d835367945e7596368a37f",
"filename": "",
"properties": {},
"public": 0,
"size": 67043148,
"created_at": 0,
"expires_at": 0,
"uploaded_at": 1435669853
}
}
def fake_alias():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"target": "ubuntu",
"description": "ubuntu"
}
}
def fake_alias_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/images/aliases/ubuntu"
]
}
def fake_container_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/containers/trusty-1"
]
}
def fake_container_state(status):
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"status_code": status
}
}
def fake_container_log():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"log": "fake log"
}
}
def fake_container_migrate():
return {
"type": "async",
"status": "Operation created",
"status_code": 100,
"metadata": {
"id": "dbd9f22c-6da5-4066-8fca-c02f09f76738",
"class": "websocket",
"created_at": "2016-02-07T09:20:53.127321875-05:00",
"updated_at": "2016-02-07T09:20:53.127321875-05:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": [
"/1.0/containers/instance-00000010"
]
},
"metadata": {
"control": "fake_control",
"fs": "fake_fs"
},
"may_cancel": 'false',
"err": ""
},
"operation": "/1.0/operations/dbd9f22c-6da5-4066-8fca-c02f09f76738"
}
def fake_snapshots_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/containers/trusty-1/snapshots/first"
]
}
def fake_certificate_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/certificates/ABCDEF01"
]
}
def fake_certificate():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"type": "client",
"certificate": "ABCDEF01"
}
}
def fake_profile_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/profiles/fake-profile"
]
}
def fake_profile():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": {
"name": "fake-profile",
"config": {
"resources.memory": "2GB",
"network.0.bridge": "lxcbr0"
}
}
}
def fake_operation_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/operations/1234"
]
}
def fake_operation():
return {
"type": "async",
"status": "OK",
"status_code": 100,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Running",
"status_code": 103,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": {},
"may_cancel": True
}
}
def fake_operation_info_ok():
return {
"type": "async",
"status": "OK",
"status_code": 200,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Completed",
"status_code": 200,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": {},
"may_cancel": True
}
}
def fake_operation_info_failed():
return {
"type": "async",
"status": "OK",
"status_code": 200,
"operation": "/1.0/operation/1234",
"metadata": {
"created_at": "2015-06-09T19:07:24.379615253-06:00",
"updated_at": "2015-06-09T19:07:23.379615253-06:00",
"status": "Failure",
"status_code": 400,
"resources": {
"containers": ["/1.0/containers/1"]
},
"metadata": "Invalid container name",
"may_cancel": True
}
}
def fake_network_list():
return {
"type": "sync",
"status": "Success",
"status_code": 200,
"metadata": [
"/1.0/networks/lxcbr0"
]
}
def fake_network():
return {
"type": "async",
"status": "OK",
"status_code": 100,
"operation": "/1.0/operation/1234",
"metadata": {
"name": "lxcbr0",
"type": "bridge",
"members": ["/1.0/containers/trusty-1"]
}
}
def fake_container_config():
return {
'name': "my-container",
'profiles': ["default"],
'architecture': 2,
'config': {"limits.cpus": "3"},
'expanded_config': {"limits.cpus": "3"},
'devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"
}
},
'expanded_devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"}
},
"eth0": {
"type": "nic",
"parent": "lxcbr0",
"hwaddr": "00:16:3e:f4:e7:1c",
"name": "eth0",
"nictype": "bridged",
}
}
def fake_container_info():
return {
'name': "my-container",
'profiles': ["default"],
'architecture': 2,
'config': {"limits.cpus": "3"},
'expanded_config': {"limits.cpus": "3"},
'devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"
}
},
'expanded_devices': {
'rootfs': {
'type': "disk",
'path': "/",
'source': "UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6"}
},
"eth0": {
"type": "nic",
"parent": "lxcbr0",
"hwaddr": "00:16:3e:f4:e7:1c",
"name": "eth0",
"nictype": "bridged",
},
'status': {
'status': "Running",
'status_code': 103,
'ips': [{'interface': "eth0",
'protocol': "INET6",
'address': "2001:470:b368:1020:1::2",
'host_veth': "vethGMDIY9"},
{'interface': "eth0",
'protocol': "INET",
'address': "172.16.15.30",
'host_veth': "vethGMDIY9"}]},
}
|
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import sleekxmpp
from optparse import OptionParser
from xml.etree import cElementTree as ET
import os
import time
import sys
import Queue
import thread
class testps(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, ssl=False, plugin_config = {}, plugin_whitelist=[], nodenum=0, pshost=None):
sleekxmpp.ClientXMPP.__init__(self, jid, password, ssl, plugin_config, plugin_whitelist)
self.registerPlugin('xep_0004')
self.registerPlugin('xep_0030')
self.registerPlugin('xep_0060')
self.registerPlugin('xep_0092')
self.add_handler("<message xmlns='jabber:client'><event xmlns='http://jabber.org/protocol/pubsub#event' /></message>", self.pubsubEventHandler, name='Pubsub Event', threaded=True)
self.add_event_handler("session_start", self.start, threaded=True)
self.add_handler("<iq type='error' />", self.handleError, name='Iq Error')
self.events = Queue.Queue()
self.default_config = None
self.ps = self.plugin['xep_0060']
self.node = "pstestnode_%s"
self.pshost = pshost
if pshost is None:
self.pshost = self.server
self.nodenum = int(nodenum)
self.leafnode = self.nodenum + 1
self.collectnode = self.nodenum + 2
self.lasterror = ''
self.sprintchars = 0
self.defaultconfig = None
self.tests = ['test_defaultConfig', 'test_createDefaultNode', 'test_getNodes', 'test_deleteNode', 'test_createWithConfig', 'test_reconfigureNode', 'test_subscribeToNode', 'test_addItem', 'test_updateItem', 'test_deleteItem', 'test_unsubscribeNode', 'test_createCollection', 'test_subscribeCollection', 'test_addNodeCollection', 'test_deleteNodeCollection', 'test_addCollectionNode', 'test_deleteCollectionNode', 'test_unsubscribeNodeCollection', 'test_deleteCollection']
self.passed = 0
self.width = 120
def start(self, event):
#TODO: make this configurable
self.getRoster()
self.sendPresence(ppriority=20)
thread.start_new(self.test_all, tuple())
def sprint(self, msg, end=False, color=False):
length = len(msg)
if color:
if color == "red":
color = "1;31"
elif color == "green":
color = "0;32"
msg = "%s%s%s" % ("\033[%sm" % color, msg, "\033[0m")
if not end:
sys.stdout.write(msg)
self.sprintchars += length
else:
self.sprint("%s%s" % ("." * (self.width - self.sprintchars - length), msg))
print('')
self.sprintchars = 0
sys.stdout.flush()
def pubsubEventHandler(self, xml):
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}items/{http://jabber.org/protocol/pubsub#event}item'):
self.events.put(item.get('id', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}items/{http://jabber.org/protocol/pubsub#event}retract'):
self.events.put(item.get('id', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}collection/{http://jabber.org/protocol/pubsub#event}disassociate'):
self.events.put(item.get('node', '__unknown__'))
for item in xml.findall('{http://jabber.org/protocol/pubsub#event}event/{http://jabber.org/protocol/pubsub#event}collection/{http://jabber.org/protocol/pubsub#event}associate'):
self.events.put(item.get('node', '__unknown__'))
def handleError(self, xml):
error = xml.find('{jabber:client}error')
self.lasterror = error.getchildren()[0].tag.split('}')[-1]
def test_all(self):
print("Running Publish-Subscribe Tests")
version = self.plugin['xep_0092'].getVersion(self.pshost)
if version:
print("%s %s on %s" % (version.get('name', 'Unknown Server'), version.get('version', 'v?'), version.get('os', 'Unknown OS')))
print("=" * self.width)
for test in self.tests:
testfunc = getattr(self, test)
self.sprint("%s" % testfunc.__doc__)
if testfunc():
self.sprint("Passed", True, "green")
self.passed += 1
else:
if not self.lasterror:
self.lasterror = 'No response'
self.sprint("Failed (%s)" % self.lasterror, True, "red")
self.lasterror = ''
print("=" * self.width)
self.sprint("Cleaning up...")
#self.ps.deleteNode(self.pshost, self.node % self.nodenum)
self.ps.deleteNode(self.pshost, self.node % self.leafnode)
#self.ps.deleteNode(self.pshost, self.node % self.collectnode)
self.sprint("Done", True, "green")
self.disconnect()
self.sprint("%s" % self.passed, False, "green")
self.sprint("/%s Passed -- " % len(self.tests))
if len(self.tests) - self.passed:
self.sprint("%s" % (len(self.tests) - self.passed), False, "red")
else:
self.sprint("%s" % (len(self.tests) - self.passed), False, "green")
self.sprint(" Failed Tests")
print
#print "%s/%s Passed -- %s Failed Tests" % (self.passed, len(self.tests), len(self.tests) - self.passed)
def test_defaultConfig(self):
"Retreiving default configuration"
result = self.ps.getNodeConfig(self.pshost)
if result is False or result is None:
return False
else:
self.defaultconfig = result
try:
self.defaultconfig.field['pubsub#access_model'].setValue('open')
except KeyError:
pass
try:
self.defaultconfig.field['pubsub#notify_retract'].setValue(True)
except KeyError:
pass
return True
def test_createDefaultNode(self):
"Creating default node"
return self.ps.create_node(self.pshost, self.node % self.nodenum)
def test_getNodes(self):
"Getting list of nodes"
self.ps.getNodes(self.pshost)
self.ps.getItems(self.pshost, 'blog')
return True
def test_deleteNode(self):
"Deleting node"
return self.ps.deleteNode(self.pshost, self.node % self.nodenum)
def test_createWithConfig(self):
"Creating node with config"
if self.defaultconfig is None:
self.lasterror = "No Avail Config"
return False
return self.ps.create_node(self.pshost, self.node % self.leafnode, self.defaultconfig)
def test_reconfigureNode(self):
"Retrieving node config and reconfiguring"
nconfig = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if nconfig == False:
return False
return self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, nconfig)
def test_subscribeToNode(self):
"Subscribing to node"
return self.ps.subscribe(self.pshost, self.node % self.leafnode)
def test_addItem(self):
"Adding item, waiting for notification"
item = ET.Element('test')
result = self.ps.setItem(self.pshost, self.node % self.leafnode, (('test_node1', item),))
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
return False
if event == 'test_node1':
return True
return False
def test_updateItem(self):
"Updating item, waiting for notification"
item = ET.Element('test')
item.attrib['crap'] = 'yup, right here'
result = self.ps.setItem(self.pshost, self.node % self.leafnode, (('test_node1', item),))
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
return False
if event == 'test_node1':
return True
return False
def test_deleteItem(self):
"Deleting item, waiting for notification"
result = self.ps.deleteItem(self.pshost, self.node % self.leafnode, 'test_node1')
if result == False:
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == 'test_node1':
return True
return False
def test_unsubscribeNode(self):
"Unsubscribing from node"
return self.ps.unsubscribe(self.pshost, self.node % self.leafnode)
def test_createCollection(self):
"Creating collection node"
return self.ps.create_node(self.pshost, self.node % self.collectnode, self.defaultconfig, True)
def test_subscribeCollection(self):
"Subscribing to collection node"
return self.ps.subscribe(self.pshost, self.node % self.collectnode)
def test_addNodeCollection(self):
"Assigning node to collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#collection'].setValue(self.node % self.collectnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#collection', value=self.node % self.collectnode)
if not self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_deleteNodeCollection(self):
"Removing node assignment to collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.leafnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#collection'].delValue(self.node % self.collectnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#collection', value='')
if not self.ps.setNodeConfig(self.pshost, self.node % self.leafnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_addCollectionNode(self):
"Assigning node from collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.collectnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#children'].setValue(self.node % self.leafnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#children', value=self.node % self.leafnode)
if not self.ps.setNodeConfig(self.pshost, self.node % self.collectnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_deleteCollectionNode(self):
"Removing node from collection, waiting for notification"
config = self.ps.getNodeConfig(self.pshost, self.node % self.collectnode)
if not config or config is None:
self.lasterror = "Config Error"
return False
try:
config.field['pubsub#children'].delValue(self.node % self.leafnode)
except KeyError:
self.sprint("...Missing Field...", False, "red")
config.addField('pubsub#children', value='')
if not self.ps.setNodeConfig(self.pshost, self.node % self.collectnode, config):
return False
try:
event = self.events.get(True, 10)
except Queue.Empty:
self.lasterror = "No Notification"
return False
if event == self.node % self.leafnode:
return True
return False
def test_unsubscribeNodeCollection(self):
"Unsubscribing from collection"
return self.ps.unsubscribe(self.pshost, self.node % self.collectnode)
def test_deleteCollection(self):
"Deleting collection"
return self.ps.deleteNode(self.pshost, self.node % self.collectnode)
if __name__ == '__main__':
#parse command line arguements
optp = OptionParser()
optp.add_option('-q','--quiet', help='set logging to ERROR', action='store_const', dest='loglevel', const=logging.ERROR, default=logging.INFO)
optp.add_option('-d','--debug', help='set logging to DEBUG', action='store_const', dest='loglevel', const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v','--verbose', help='set logging to COMM', action='store_const', dest='loglevel', const=5, default=logging.INFO)
optp.add_option("-c","--config", dest="configfile", default="config.xml", help="set config file to use")
optp.add_option("-n","--nodenum", dest="nodenum", default="1", help="set node number to use")
optp.add_option("-p","--pubsub", dest="pubsub", default="1", help="set pubsub host to use")
opts,args = optp.parse_args()
logging.basicConfig(level=opts.loglevel, format='%(levelname)-8s %(message)s')
#load xml config
logging.info("Loading config file: %s" % opts.configfile)
config = ET.parse(os.path.expanduser(opts.configfile)).find('auth')
#init
logging.info("Logging in as %s" % config.attrib['jid'])
plugin_config = {}
plugin_config['xep_0092'] = {'name': 'SleekXMPP Example', 'version': '0.1-dev'}
plugin_config['xep_0199'] = {'keepalive': True, 'timeout': 30, 'frequency': 300}
con = testps(config.attrib['jid'], config.attrib['pass'], plugin_config=plugin_config, plugin_whitelist=[], nodenum=opts.nodenum, pshost=opts.pubsub)
if not config.get('server', None):
# we don't know the server, but the lib can probably figure it out
con.connect()
else:
con.connect((config.attrib['server'], 5222))
con.process(threaded=False)
print("")
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020, XESS Corp. The MIT License (MIT).
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import math
import operator
from builtins import dict, int, str, super
from collections import Counter, namedtuple
from copy import copy
import IPython.display as DISP
import matplotlib.pyplot as plt
import pandas as pd
from future import standard_library
from tabulate import tabulate
standard_library.install_aliases()
# Waveform samples consist of a time and a value.
Sample = namedtuple("Sample", "time value")
class Trace(list):
"""
Trace objects are lists that store a sequence of samples. The samples
should be arranged in order of ascending sample time.
"""
unit_time = 1 # Time interval for a single tick-mark span.
# Default matplotlib settings for a Trace.
# line_fmt (string): [marker][line][color] https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
# line2D (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
# name_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
# data_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
line_fmt = "-C0" # solid, blue line.
name_fmt = {}
data_fmt = {}
slope = 0.20 # trace transition slope as % of unit_time.
trace_fields = ["line_fmt", "name_fmt", "data_fmt", "slope"]
def __init__(self, *args, **kwargs):
self.config(**kwargs)
super().__init__(*args)
self.name = None
self.num_bits = 0
@classmethod
def config_defaults(cls, **kwargs):
"""
Set default configuration for all Traces.
Keyword Args:
line_fmt (string): [marker][line][color] https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
line2D (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
name_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
data_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
"""
for k, v in kwargs.items():
if k not in cls.trace_fields:
continue
setattr(cls, k, copy(v))
for k in cls.trace_fields:
kwargs.pop(k, None) # Remove the keyword arg.
def config(self, **kwargs):
"""
Set configuration for a particular Trace.
Keyword Args:
line_fmt (string): [marker][line][color] https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
line2D (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
name_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
data_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
"""
for k, v in kwargs.items():
if k not in self.trace_fields:
continue
if isinstance(v, dict):
setattr(self, k, copy(getattr(self, k, {})))
try:
getattr(self, k).update(v)
except AttributeError:
setattr(self, k, {})
getattr(self, k).update(v)
else:
setattr(self, k, copy(v))
for k in self.trace_fields:
kwargs.pop(k, None) # Remove the keyword arg.
def store_sample(self, value, time):
"""Store a value and the current time into the trace."""
self.append(Sample(time, copy(value)))
def insert_sample(self, sample):
"""Insert a sample into the correct position within a trace"""
index = self.get_index(sample.time)
self.insert(index, sample)
def start_time(self):
"""Return the time of the first sample in the trace."""
return self[0].time
def stop_time(self):
"""Return the time of the last sample in the trace."""
return self[-1].time
def get_index(self, time):
"""Return the position to insert a sample with the given time."""
for i, sample in enumerate(self):
# Return the index of the 1st sample with a time GREATER than the
# given time because the sample will be inserted BEFORE that index.
if sample.time > time:
return i
# Didn't find a sample with a time greater than the given time, so
# the insertion point is the end of the trace list.
return len(self)
def get_value(self, time):
"""Get the trace value at an arbitrary time."""
# Return the signal value immediately BEFORE the insertion index.
return self[max(0, self.get_index(time) - 1)].value
def get_disp_value(self, time, **kwargs):
"""Get the displayed trace value at an arbitrary time."""
# Get the function for displaying the trace's value, first from kwargs or else from trace data_fmt attr.
data_fmt = kwargs.get("data_fmt", getattr(self, "data_fmt"))
repr = data_fmt.get("repr", str)
val = self.get_value(time)
try:
return repr(val)
except (TypeError, ValueError):
return str(val)
def get_sample_times(self, **kwargs):
"""Return list of times at which the trace was sampled."""
start_time = kwargs.pop("start_time", self.start_time())
stop_time = kwargs.pop("stop_time", self.stop_time())
return [
sample.time for sample in self if start_time <= sample.time <= stop_time
]
def delay(self, delta):
"""Return the trace data shifted in time by delta units."""
delayed_trace = copy(self)
delayed_trace.clear()
delayed_trace.extend([Sample(t + delta, v) for t, v in self])
return delayed_trace
def extend_duration(self, start_time, end_time):
"""Extend the duration of a trace."""
# Extend the trace data to start_time unless the trace data already precedes that.
if start_time < self[0].time:
self.insert(0, Sample(start_time, self[0].value))
# Extend the trace data to end_time unless the trace data already exceeds that.
if end_time > self[-1].time:
self.append(Sample(end_time, self[-1].value))
def collapse_time_repeats(self):
"""Return trace with samples having the same sampling time collapsed into a single sample."""
trace = copy(self)
trace.clear()
# Build the trace backwards, moving from the newest to the oldest sample.
# Accept only samples having a time < the most recently accepted sample.
trace.append(self[-1])
for sample in self[-1::-1]:
if sample.time < trace[0].time:
trace.insert(0, sample)
return trace
def collapse_value_repeats(self):
"""Return trace with consecutive samples having the same value collapsed into a single sample."""
trace = copy(self)
trace.clear()
# Build the trace forwards, removing any samples with the same
# value as the previous sample.
trace.append(self[0])
for sample in self[1:]:
if sample.value != trace[-1].value:
trace.append(sample)
return trace
def interpolate(self, times):
"""Insert interpolated values at the times in the given list."""
for time in times:
insert_sample(Sample(self.get_value(time), time))
def add_rise_fall(self, delta):
"""Add rise/fall time to trace transitions. Remove repeats before calling!"""
trace = copy(self)
prev_sample = trace[0]
for sample in trace[1:]:
# TODO: This causes a problem if sample.time - delta < prev_sample.time.
trace.insert_sample(Sample(sample.time - delta, prev_sample.value))
prev_sample = sample
return trace
def add_slope(self):
"""Return a trace with slope added to trace transitions."""
slope = max(self.slope, 0.0001) * self.unit_time # Don't let slope go to 0.
return self.add_rise_fall(slope).delay(slope / 2)
def binarize(self):
"""Return trace of sample values set to 1 (if true) or 0 (if false)."""
return Trace([Sample(t, (v and 1) or 0) for t, v in self])
def apply_op1(self, op_func):
"""Return trace generated by applying the operator function to all the sample values in the trace."""
return Trace([Sample(t, op_func(v)) for t, v in self])
def apply_op2(self, trc, op_func):
"""Return trace generated by applying the operator function to two traces."""
if isinstance(trc, Trace):
pass
# If the function input is a constant, then make a trace from it.
elif isinstance(trc, (int, float)):
trc = Trace([Sample(0, trc)])
# See if the input object contains a trace.
else:
try:
trc = trc.trace
except AttributeError:
pass
# Abort if operating on something that's not a Trace.
if not isinstance(trc, Trace):
raise Exception(
"Trace can only be combined with another Trace or a number."
)
# Make copies of the traces since they will be altered.
trc1 = copy(self)
trc2 = copy(trc)
# Extend the traces so they start/end at the same time.
start_time = min(trc1[0].time, trc2[0].time)
end_time = max(trc1[-1].time, trc2[-1].time)
trc1.extend_duration(start_time, end_time)
trc2.extend_duration(start_time, end_time)
# Make a trace to hold the result generated by combining the two traces.
res_trc = Trace([])
# Loop through the trace samples, always using the earliest sample of the two.
indx1 = 0 # Index of current sample in trc1.
indx2 = 0 # Index of current sample in trc2.
while True:
# Find the earliest sample of the two traces and get the value
# of each trace at that time.
t1, v1 = trc1[indx1]
t2, v2 = trc2[indx2]
if t1 <= t2:
curr_time = t1
else:
curr_time = t2
v2 = trc2.get_value(curr_time)
v1 = trc1.get_value(curr_time)
# Combine the trace values using the operator.
res_trc_val = op_func(v1, v2)
# Append result to the results trace.
res_trc.append(Sample(curr_time, res_trc_val))
# Looped through all samples if each index is pointing to the
# last sample in their traces.
if indx1 == len(trc1) - 1 and indx2 == len(trc2) - 1:
break
# Move to next sample after the current time. (Might need to
# increment both traces if they both had samples at the current time.)
if t1 == curr_time:
indx1 += 1
if t2 == curr_time:
indx2 += 1
# Return trace containing the result of the operation.
return res_trc
def __eq__(self, trc):
return self.apply_op2(trc, operator.eq).binarize()
def __ne__(self, trc):
return self.apply_op2(trc, operator.ne).binarize()
def __le__(self, trc):
return self.apply_op2(trc, operator.le).binarize()
def __ge__(self, trc):
return self.apply_op2(trc, operator.ge).binarize()
def __lt__(self, trc):
return self.apply_op2(trc, operator.lt).binarize()
def __gt__(self, trc):
return self.apply_op2(trc, operator.gt).binarize()
def __add__(self, trc):
return self.apply_op2(trc, operator.add)
def __sub__(self, trc):
return self.apply_op2(trc, operator.sub)
def __mul__(self, trc):
return self.apply_op2(trc, operator.mul)
def __floordiv__(self, trc):
return self.apply_op2(trc, operator.floordiv)
def __truediv__(self, trc):
return self.apply_op2(trc, operator.truediv)
def __mod__(self, trc):
return self.apply_op2(trc, operator.mod)
def __lshift__(self, trc):
return self.apply_op2(trc, operator.lshift)
def __rshift__(self, trc):
return self.apply_op2(trc, operator.rshift)
def __and__(self, trc):
return self.apply_op2(trc, operator.and_)
def __or__(self, trc):
return self.apply_op2(trc, operator.or_)
def __xor__(self, trc):
return self.apply_op2(trc, operator.xor)
def __pow__(self, trc):
return self.apply_op2(trc, operator.pow)
def __pos__(self):
return self.apply_op1(operator.pos)
def __neg__(self):
return self.apply_op1(operator.neg)
def __not__(self):
return self.apply_op1(operator.not_).binarize()
def __inv__(self):
return self.apply_op1(operator.inv)
def __invert__(self):
return self.apply_op1(operator.invert)
def __abs__(self):
return self.apply_op1(operator.abs)
def anyedge(self):
return (self != self.delay(self.unit_time)).binarize()
def posedge(self):
return (self & (~self.delay(self.unit_time))).binarize()
def negedge(self):
return ((~self) & self.delay(self.unit_time)).binarize()
def trig_times(self):
"""Return list of times trace value is true (non-zero)."""
return [sample.time for sample in self if sample.value]
def to_matplotlib(self, subplot, start_time, stop_time, xlim=None, **kwargs):
"""Fill a matplotlib subplot for a trace between the start & stop times."""
# Copy trace and apply formatting to the copy.
trace = copy(self)
trace.config(**kwargs)
# Set the X axis limits to clip the trace duration to the desired limits.
if not xlim:
xlim = (start_time, stop_time)
subplot.set_xlim(*xlim)
# Set the Y axis limits.
subplot.set_ylim(-0.2, 1.2)
# Set the Y axis label position for each trace name.
ylbl_position = dict(
rotation=0, horizontalalignment="right", verticalalignment="center", x=-0.01
)
subplot.set_ylabel(trace.name, ylbl_position, **trace.name_fmt)
# Remove ticks from Y axis.
subplot.set_yticks([])
subplot.tick_params(axis="y", length=0, which="both")
# Remove the box around the subplot.
subplot.spines["left"].set_visible(False)
subplot.spines["right"].set_visible(False)
subplot.spines["top"].set_visible(False)
subplot.spines["bottom"].set_visible(False)
# Copy the trace while removing any consecutively-repeated values.
trace = trace.collapse_value_repeats()
# Insert samples for beginning/end times into a copy of the trace data.
trace.insert_sample(Sample(start_time, self.get_value(start_time)))
trace.insert_sample(Sample(stop_time, self.get_value(stop_time)))
# Remove repeats of samples having the same sample time.
trace = trace.collapse_time_repeats()
# Extend the trace on both ends to make sure it covers the start/stop interval.
# Count on matplotlib to clip the waveforms.
# trace[0] = Sample(trace[0].time - self.unit_time, trace[0].value)
# trace[-1] = Sample(trace[-1].time + self.unit_time, trace[-1].value)
# Plot the bus or binary trace.
if trace.num_bits > 1:
# Multi-bit bus trace.
# Get the function for displaying the bus value.
repr = trace.data_fmt.get("repr", str)
# Copy data format with repr function removed because matplotlib won't like it.
data_fmt = copy(trace.data_fmt)
data_fmt.pop("repr", None)
# Get list of times the bus changes values.
chg_times = [sample.time for sample in trace]
# Print bus values at midpoints of the bus packets.
time0 = chg_times[0]
for time1 in chg_times[1:]:
if time0 < start_time:
time0 = start_time
if time1 <= time0:
time0 = time1
continue
if time1 > stop_time:
time1 = stop_time
val = trace.get_disp_value(time0, **kwargs)
text_x = (time1 + time0) / 2
text_y = 0.5
subplot.text(
text_x,
text_y,
val,
horizontalalignment="center",
verticalalignment="center",
**data_fmt # Use local data_fmt dict with repr removed.
)
time0 = time1
if time0 >= stop_time:
break
# Create a binary trace that toggles whenever the bus trace changes values.
tgl_trace = copy(trace)
tgl_trace.clear()
value = 0
for time in chg_times:
tgl_trace.store_sample(value, time)
value ^= 1
# Slope the transitions of the toggle waveform.
tgl_trace = tgl_trace.add_slope()
# Create a complementary trace for drawing bus packets.
bar_trace = tgl_trace.__not__()
# Plot the trace packets.
x = [sample.time for sample in tgl_trace]
y = [sample.value for sample in tgl_trace]
y_bar = [sample.value for sample in bar_trace]
if isinstance(trace.line_fmt, dict):
subplot.plot(x, y, x, y_bar, **trace.line_fmt)
else:
subplot.plot(x, y, trace.line_fmt, x, y_bar, trace.line_fmt)
else:
# Binary trace.
trace = trace.add_slope()
x = [sample.time for sample in trace]
y = [sample.value for sample in trace]
if isinstance(trace.line_fmt, dict):
subplot.plot(x, y, **trace.line_fmt)
else:
subplot.plot(x, y, trace.line_fmt)
def to_wavejson(self, start_time, stop_time):
"""Generate the WaveJSON data for a trace between the start & stop times."""
has_samples = False # No samples currently on the wave.
wave_str = "" # No samples, so wave string is empty.
wave_data = list() # No samples, so wave data values are empty.
prev_time = start_time # Set time of previous sample to the wave starting time.
prev_val = None # Value of previous sample starts at non-number.
# Save the current state of the waveform.
prev = [has_samples, wave_str, copy(wave_data), prev_time, prev_val]
# Insert samples into a copy of the waveform data. These samples bound
# the beginning and ending times of the waveform.
bounded_samples = copy(self)
bounded_samples.insert_sample(Sample(start_time, self.get_value(start_time)))
bounded_samples.insert_sample(Sample(stop_time, self.get_value(stop_time)))
# Create the waveform by processing the waveform data.
for time, val in bounded_samples:
# Skip samples before the desired start of the time window.
if time < start_time:
continue
# Exit the loop if the current sample occurred after the time window.
if time > stop_time:
break
# If a sample occurred at the same time as the previous sample,
# then revert back to the previous waveform to remove the previous
# sample and put this new sample in its place.
if time == prev_time:
has_samples, wave_str, wave_data, prev_time, prev_val = prev
# Save the current waveform in case a back-up is needed.
prev = [has_samples, wave_str, copy(wave_data), prev_time, prev_val]
# If the current sample occurred after the desired time window,
# then just extend the previous sample up to the end of the window.
if time > stop_time:
val = prev_val # Use the value of the previous sample.
time = stop_time # Extend it to the end of the window.
# Replicate the sample's previous value up to the current time.
wave_str += "." * (round((time - prev_time) / self.unit_time) - 1)
# Add the current sample's value to the waveform.
if has_samples and (val == prev_val):
# Just extend the previous sample if the current sample has the same value.
wave_str += "."
else:
if self.num_bits > 1:
# Value will be shown in a data "envelope".
wave_str += "="
wave_data.append(str(val))
else:
# Binary (hi/lo) waveform.
wave_str += str(val * 1) # Turn value into '1' or '0'.
has_samples = True # The waveform now contains samples.
prev_time = time # Save the time and value of the
prev_val = val # sample that was just added to the waveform.
# Return a dictionary with the wave in a format that WaveDrom understands.
wave = dict()
wave["name"] = self.name
wave["wave"] = wave_str
if wave_data:
wave["data"] = wave_data
return wave
###############################################################################
# Functions for handling multiple traces follow...
###############################################################################
def calc_unit_time(*traces):
"""Calculate and return the unit time between trace samples."""
intervals = Counter()
for trace in traces:
times = sorted(
trace.collapse_time_repeats().collapse_value_repeats().get_sample_times()
)
intervals.update([t[1] - t[0] for t in zip(times[:-1], times[1:])])
most_common_interval = intervals.most_common(1)[0][0]
min_interval = min(intervals.keys())
ratio = most_common_interval / min_interval
if math.isclose(round(ratio), ratio, abs_tol=0.01):
return min_interval
raise Exception(
"Unable to determine the unit_time for the set of Traces."
"Manually set it using Peeker.unit_time = <simulation step time>."
)
def _get_sample_times(*traces, **kwargs):
"""Get sample times for all the traces."""
# Set the time boundaries for the DataFrame.
max_stop_time = max(
[trace.stop_time() for trace in traces if isinstance(trace, Trace)]
)
stop_time = kwargs.pop("stop_time", max_stop_time)
min_start_time = min(
[trace.start_time() for trace in traces if isinstance(trace, Trace)]
)
start_time = kwargs.pop("start_time", min_start_time)
# Get all the sample times of all the traces between the start and stop times.
times = set([start_time, stop_time])
for trace in traces:
times.update(
set(trace.get_sample_times(start_time=start_time, stop_time=stop_time))
)
# If requested, fill in additional times between sample times.
step = kwargs.pop("step", 0)
if step:
times.update(set(range(start_time, stop_time + 1, step)))
# Sort sample times in increasing order.
times = sorted(list(times))
return times
def traces_to_dataframe(*traces, **kwargs):
"""
Create Pandas dataframe of sample times and values for a set of traces.
Args:
*traces: A list of traces with samples. Can also contain non-Traces
which will be ignored.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
A Pandas dataframe of sample times and values for a set of traces.
"""
# Extract all the traces and ignore all the non-traces.
traces = [t for t in traces if isinstance(t, Trace)]
# Get sample times.
times = _get_sample_times(*traces, **kwargs)
# Create dict of trace sample lists.
trace_data = {
tr.name: [tr.get_disp_value(t, **kwargs) for t in times] for tr in traces
}
# Return a DataFrame where each column is a trace and time is the index.
return pd.DataFrame(trace_data, index=times)
def traces_to_table_data(*traces, **kwargs):
"""
Create table of sample times and values for a set of traces.
Args:
*traces: A list of traces with samples. Can also contain non-Traces
which will be ignored.
Keywords Args:
start_time: The earliest (left-most) time bound for the traces.
stop_time: The latest (right-most) time bound for the traces.
step: Set the time increment for filling in between sample times.
If 0, then don't fill in between sample times.
Returns:
Table data and a list of headers for table columns.
"""
# Extract all the traces and ignore all the non-traces.
traces = [t for t in traces if isinstance(t, Trace)]
# Get sample times.
times = _get_sample_times(*traces, **kwargs)
# Create a table from lines of data where the first element in each row
# is the sample time and the following elements are the trace values.
table_data = list()
for time in times:
row = [trace.get_disp_value(time, **kwargs) for trace in traces]
row.insert(0, time)
table_data.append(row)
headers = ["Time"] + [trace.name for trace in traces]
return table_data, headers
def traces_to_table(*traces, **kwargs):
format = kwargs.get("format", "simple")
table_data, headers = traces_to_table_data(*traces, **kwargs)
return tabulate(tabular_data=table_data, headers=headers, tablefmt=format)
def traces_to_text_table(*traces, **kwargs):
if "format" not in kwargs:
kwargs["format"] = "simple"
print(traces_to_table(*traces, **kwargs))
def traces_to_html_table(*traces, **kwargs):
kwargs["format"] = "html"
tbl_html = traces_to_table(*traces, **kwargs)
# Generate the HTML from the JSON.
DISP.display_html(DISP.HTML(tbl_html))
def _interpolate_traces(*traces, times):
"""Interpolate trace values at times in the given list."""
for trace in traces:
trace.interpolate(times)
def traces_to_matplotlib(*traces, **kwargs):
"""
Display waveforms stored in peekers in Jupyter notebook using matplotlib.
Args:
*traces: A list of traces to convert into matplotlib for display.
Can also contain None which will create a blank trace.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
title_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
caption: String containing the title placed across the bottom of the display.
caption_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
grid_fmt (dict): https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.lines.Line2D.html#matplotlib.lines.Line2D
time_fmt (dict): https://matplotlib.org/3.2.1/api/text_api.html#matplotlib.text.Text
width: The width of the waveform display in inches.
height: The height of the waveform display in inches.
Returns:
Figure and axes created by matplotlib.pyplot.subplots.
"""
num_traces = len(traces)
trace_hgt = 0.5 # Default trace height in inches.
cycle_wid = 0.5 # Default unit cycle width in inches.
# Handle keyword args explicitly for Python 2 compatibility.
start_time = kwargs.pop(
"start_time",
min([trace.start_time() for trace in traces if isinstance(trace, Trace)]),
)
stop_time = kwargs.pop(
"stop_time",
max([trace.stop_time() for trace in traces if isinstance(trace, Trace)]),
)
title = kwargs.pop("title", "")
title_fmt = {"fontweight": "bold"}
title_fmt.update(kwargs.pop("title_fmt", {}))
caption = kwargs.pop("caption", "")
caption_fmt = {"fontstyle": "oblique"}
caption_fmt.update(kwargs.pop("caption_fmt", {}))
tick = kwargs.pop("tick", False)
tock = kwargs.pop("tock", False)
grid_fmt = {"color": "C1", "alpha": 1.0}
grid_fmt.update(kwargs.pop("grid_fmt", {}))
time_fmt = {}
time_fmt.update(kwargs.pop("time_fmt", {}))
width = kwargs.pop("width", (stop_time - start_time) / Trace.unit_time * cycle_wid)
height = kwargs.pop("height", num_traces * trace_hgt)
# Create separate plot traces for each selected waveform.
trace_hgt_pctg = 1.0 / num_traces
fig, axes = plt.subplots(
nrows=num_traces,
sharex=True,
squeeze=False,
subplot_kw=None,
gridspec_kw=None,
figsize=(width, height),
)
axes = axes[:, 0] # Collapse 2D matrix of subplots into a 1D list.
# Set the caption on the X-axis label on the bottom-most trace.
axes[-1].set_xlabel(caption, **caption_fmt)
# Set the title for the collection of traces on the top-most trace.
axes[0].set_title(title, **title_fmt)
# Set X-axis ticks at the bottom of the stack of traces.
start = math.floor(start_time / Trace.unit_time)
stop = math.ceil(stop_time / Trace.unit_time)
axes[-1].tick_params(axis="x", length=0, which="both") # No tick marks.
# Set positions of tick marks so grid lines will work.
axes[-1].set_xticks(
[x * Trace.unit_time for x in range(start, stop + 1)], minor=False
)
axes[-1].set_xticks(
[(x + 0.5) * Trace.unit_time for x in range(start, stop)], minor=True
)
# Place cycle times at tick marks or between them.
if not tick:
axes[-1].set_xticklabels([], minor=False, **time_fmt)
if tock:
axes[-1].set_xticklabels(
[str(x) for x in range(start, stop)], minor=True, **time_fmt
)
# Adjust the limits of the X axis so the grid doesn't get chopped-off and
# produce artifacts if a grid line is at the right or left edge.
bbox = axes[-1].get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width_in_pixels = bbox.width * fig.dpi
time_per_pixel = (stop_time - start_time) / width_in_pixels
xlim = (start_time - time_per_pixel, stop_time + time_per_pixel)
# Plot each trace waveform.
for i, (trace, axis) in enumerate(zip(traces, axes), 1):
# Set position of trace within stacked traces.
axis.set_position([0.1, (num_traces - i) * trace_hgt_pctg, 0.8, trace_hgt_pctg])
# Place grid on X axis.
axis.grid(axis="x", **grid_fmt)
if not trace:
# Leave a blank space for non-traces.
# Remove ticks from Y axis.
axis.set_yticks([])
axis.tick_params(axis="y", length=0, which="both")
# Remove the box around the subplot.
axis.spines["left"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.spines["top"].set_visible(False)
axis.spines["bottom"].set_visible(False)
else:
trace.to_matplotlib(axis, start_time, stop_time, xlim, **kwargs)
# Return figure and axes for possible further processing.
return fig, axes
def wavejson_to_wavedrom(wavejson, width=None, skin="default"):
"""
Create WaveDrom display from WaveJSON data.
This code is from https://github.com/witchard/ipython-wavedrom.
Inputs:
width: Width of the display window in pixels. If left as None, the entire
waveform will be squashed into the width of the page. To prevent
this, set width to a large value. The display will then become scrollable.
skin: Selects the set of graphic elements used to draw the waveforms.
Allowable values are 'default' and 'narrow'.
"""
# Set the width of the waveform display.
style = ""
if width != None:
style = ' style="width: {w}px"'.format(w=str(int(width)))
# Generate the HTML from the JSON.
htmldata = '<div{style}><script type="WaveDrom">{json}</script></div>'.format(
style=style, json=json.dumps(wavejson)
)
DISP.display_html(DISP.HTML(htmldata))
# Trigger the WaveDrom Javascript that creates the graphical display.
DISP.display_javascript(
DISP.Javascript(
data="WaveDrom.ProcessAll();",
lib=[
"https://wavedrom.com/wavedrom.min.js",
"https://wavedrom.com/skins/{skin}.js".format(skin=skin),
],
)
)
# The following allows the display of WaveDROM in the HTML files generated by nbconvert.
# It's disabled because it makes Github's nbconvert freak out.
setup = """
<script src="https://wavedrom.com/skins/{skin}.js" type="text/javascript"></script>
<script src="https://wavedrom.com/wavedrom.min.js" type="text/javascript"></script>
<body onload="WaveDrom.ProcessAll()">
""".format(
skin=skin
)
# DISP.display_html(DISP.HTML(setup))
def traces_to_wavejson(*traces, **kwargs):
"""
Convert traces into a WaveJSON data structure.
Args:
*traces: A list of traces to convert into WaveJSON for display.
Can also contain None which will create a blank trace.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
Returns:
A dictionary with the JSON data for the waveforms.
"""
# Handle keyword args explicitly for Python 2 compatibility.
tock = kwargs.get("tock", False)
tick = kwargs.get("tick", False)
caption = kwargs.get("caption")
title = kwargs.get("title")
stop_time = kwargs.get(
"stop_time",
max([trace.stop_time() for trace in traces if isinstance(trace, Trace)]),
)
start_time = kwargs.get(
"start_time",
min([trace.start_time() for trace in traces if isinstance(trace, Trace)]),
)
wavejson = dict()
wavejson["signal"] = list()
for trace in traces:
if isinstance(trace, Trace):
wavejson["signal"].append(trace.to_wavejson(start_time, stop_time))
else:
# Insert an empty dictionary to create a blank line.
wavejson["signal"].append(dict())
# Integer start time for calculating tick/tock values.
int_start_time = round(start_time / Trace.unit_time)
# Create a header for the set of waveforms.
if title or tick or tock:
head = dict()
if title:
head["text"] = [
"tspan",
[
"tspan",
{"fill": "blue", "font-size": "16", "font-weight": "bold"},
title,
],
]
if tick:
head["tick"] = int_start_time
if tock:
head["tock"] = int_start_time
wavejson["head"] = head
# Create a footer for the set of waveforms.
if caption or tick or tock:
foot = dict()
if caption:
foot["text"] = ["tspan", ["tspan", {"font-style": "italic"}, caption]]
if tick:
foot["tick"] = int_start_time
if tock:
foot["tock"] = int_start_time
wavejson["foot"] = foot
return wavejson
def traces_to_wavedrom(*traces, **kwargs):
"""
Display waveforms stored in peekers in Jupyter notebook using wavedrom.
Args:
*traces: A list of traces to convert into WaveJSON for display.
Keywords Args:
start_time: The earliest (left-most) time bound for the waveform display.
stop_time: The latest (right-most) time bound for the waveform display.
title: String containing the title placed across the top of the display.
caption: String containing the title placed across the bottom of the display.
tick: If true, times are shown at the tick marks of the display.
tock: If true, times are shown between the tick marks of the display.
width: The width of the waveform display in pixels.
Returns:
Nothing.
"""
wavejson_to_wavedrom(
traces_to_wavejson(*traces, **kwargs),
width=kwargs.get("width"),
skin=kwargs.get("skin", "default"),
)
|
|
#
# The Python Imaging Library.
# $Id$
#
##
# Image plugin for Palm pixmap images (output only).
##
__version__ = "1.0"
import Image, ImageFile
_Palm8BitColormapValues = (
( 255, 255, 255 ), ( 255, 204, 255 ), ( 255, 153, 255 ), ( 255, 102, 255 ),
( 255, 51, 255 ), ( 255, 0, 255 ), ( 255, 255, 204 ), ( 255, 204, 204 ),
( 255, 153, 204 ), ( 255, 102, 204 ), ( 255, 51, 204 ), ( 255, 0, 204 ),
( 255, 255, 153 ), ( 255, 204, 153 ), ( 255, 153, 153 ), ( 255, 102, 153 ),
( 255, 51, 153 ), ( 255, 0, 153 ), ( 204, 255, 255 ), ( 204, 204, 255 ),
( 204, 153, 255 ), ( 204, 102, 255 ), ( 204, 51, 255 ), ( 204, 0, 255 ),
( 204, 255, 204 ), ( 204, 204, 204 ), ( 204, 153, 204 ), ( 204, 102, 204 ),
( 204, 51, 204 ), ( 204, 0, 204 ), ( 204, 255, 153 ), ( 204, 204, 153 ),
( 204, 153, 153 ), ( 204, 102, 153 ), ( 204, 51, 153 ), ( 204, 0, 153 ),
( 153, 255, 255 ), ( 153, 204, 255 ), ( 153, 153, 255 ), ( 153, 102, 255 ),
( 153, 51, 255 ), ( 153, 0, 255 ), ( 153, 255, 204 ), ( 153, 204, 204 ),
( 153, 153, 204 ), ( 153, 102, 204 ), ( 153, 51, 204 ), ( 153, 0, 204 ),
( 153, 255, 153 ), ( 153, 204, 153 ), ( 153, 153, 153 ), ( 153, 102, 153 ),
( 153, 51, 153 ), ( 153, 0, 153 ), ( 102, 255, 255 ), ( 102, 204, 255 ),
( 102, 153, 255 ), ( 102, 102, 255 ), ( 102, 51, 255 ), ( 102, 0, 255 ),
( 102, 255, 204 ), ( 102, 204, 204 ), ( 102, 153, 204 ), ( 102, 102, 204 ),
( 102, 51, 204 ), ( 102, 0, 204 ), ( 102, 255, 153 ), ( 102, 204, 153 ),
( 102, 153, 153 ), ( 102, 102, 153 ), ( 102, 51, 153 ), ( 102, 0, 153 ),
( 51, 255, 255 ), ( 51, 204, 255 ), ( 51, 153, 255 ), ( 51, 102, 255 ),
( 51, 51, 255 ), ( 51, 0, 255 ), ( 51, 255, 204 ), ( 51, 204, 204 ),
( 51, 153, 204 ), ( 51, 102, 204 ), ( 51, 51, 204 ), ( 51, 0, 204 ),
( 51, 255, 153 ), ( 51, 204, 153 ), ( 51, 153, 153 ), ( 51, 102, 153 ),
( 51, 51, 153 ), ( 51, 0, 153 ), ( 0, 255, 255 ), ( 0, 204, 255 ),
( 0, 153, 255 ), ( 0, 102, 255 ), ( 0, 51, 255 ), ( 0, 0, 255 ),
( 0, 255, 204 ), ( 0, 204, 204 ), ( 0, 153, 204 ), ( 0, 102, 204 ),
( 0, 51, 204 ), ( 0, 0, 204 ), ( 0, 255, 153 ), ( 0, 204, 153 ),
( 0, 153, 153 ), ( 0, 102, 153 ), ( 0, 51, 153 ), ( 0, 0, 153 ),
( 255, 255, 102 ), ( 255, 204, 102 ), ( 255, 153, 102 ), ( 255, 102, 102 ),
( 255, 51, 102 ), ( 255, 0, 102 ), ( 255, 255, 51 ), ( 255, 204, 51 ),
( 255, 153, 51 ), ( 255, 102, 51 ), ( 255, 51, 51 ), ( 255, 0, 51 ),
( 255, 255, 0 ), ( 255, 204, 0 ), ( 255, 153, 0 ), ( 255, 102, 0 ),
( 255, 51, 0 ), ( 255, 0, 0 ), ( 204, 255, 102 ), ( 204, 204, 102 ),
( 204, 153, 102 ), ( 204, 102, 102 ), ( 204, 51, 102 ), ( 204, 0, 102 ),
( 204, 255, 51 ), ( 204, 204, 51 ), ( 204, 153, 51 ), ( 204, 102, 51 ),
( 204, 51, 51 ), ( 204, 0, 51 ), ( 204, 255, 0 ), ( 204, 204, 0 ),
( 204, 153, 0 ), ( 204, 102, 0 ), ( 204, 51, 0 ), ( 204, 0, 0 ),
( 153, 255, 102 ), ( 153, 204, 102 ), ( 153, 153, 102 ), ( 153, 102, 102 ),
( 153, 51, 102 ), ( 153, 0, 102 ), ( 153, 255, 51 ), ( 153, 204, 51 ),
( 153, 153, 51 ), ( 153, 102, 51 ), ( 153, 51, 51 ), ( 153, 0, 51 ),
( 153, 255, 0 ), ( 153, 204, 0 ), ( 153, 153, 0 ), ( 153, 102, 0 ),
( 153, 51, 0 ), ( 153, 0, 0 ), ( 102, 255, 102 ), ( 102, 204, 102 ),
( 102, 153, 102 ), ( 102, 102, 102 ), ( 102, 51, 102 ), ( 102, 0, 102 ),
( 102, 255, 51 ), ( 102, 204, 51 ), ( 102, 153, 51 ), ( 102, 102, 51 ),
( 102, 51, 51 ), ( 102, 0, 51 ), ( 102, 255, 0 ), ( 102, 204, 0 ),
( 102, 153, 0 ), ( 102, 102, 0 ), ( 102, 51, 0 ), ( 102, 0, 0 ),
( 51, 255, 102 ), ( 51, 204, 102 ), ( 51, 153, 102 ), ( 51, 102, 102 ),
( 51, 51, 102 ), ( 51, 0, 102 ), ( 51, 255, 51 ), ( 51, 204, 51 ),
( 51, 153, 51 ), ( 51, 102, 51 ), ( 51, 51, 51 ), ( 51, 0, 51 ),
( 51, 255, 0 ), ( 51, 204, 0 ), ( 51, 153, 0 ), ( 51, 102, 0 ),
( 51, 51, 0 ), ( 51, 0, 0 ), ( 0, 255, 102 ), ( 0, 204, 102 ),
( 0, 153, 102 ), ( 0, 102, 102 ), ( 0, 51, 102 ), ( 0, 0, 102 ),
( 0, 255, 51 ), ( 0, 204, 51 ), ( 0, 153, 51 ), ( 0, 102, 51 ),
( 0, 51, 51 ), ( 0, 0, 51 ), ( 0, 255, 0 ), ( 0, 204, 0 ),
( 0, 153, 0 ), ( 0, 102, 0 ), ( 0, 51, 0 ), ( 17, 17, 17 ),
( 34, 34, 34 ), ( 68, 68, 68 ), ( 85, 85, 85 ), ( 119, 119, 119 ),
( 136, 136, 136 ), ( 170, 170, 170 ), ( 187, 187, 187 ), ( 221, 221, 221 ),
( 238, 238, 238 ), ( 192, 192, 192 ), ( 128, 0, 0 ), ( 128, 0, 128 ),
( 0, 128, 0 ), ( 0, 128, 128 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ),
( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ), ( 0, 0, 0 ))
# so build a prototype image to be used for palette resampling
def build_prototype_image():
image = Image.new("L", (1,len(_Palm8BitColormapValues),))
image.putdata(range(len(_Palm8BitColormapValues)))
palettedata = ()
for i in range(len(_Palm8BitColormapValues)):
palettedata = palettedata + _Palm8BitColormapValues[i]
for i in range(256 - len(_Palm8BitColormapValues)):
palettedata = palettedata + (0, 0, 0)
image.putpalette(palettedata)
return image
Palm8BitColormapImage = build_prototype_image()
# OK, we now have in Palm8BitColormapImage, a "P"-mode image with the right palette
#
# --------------------------------------------------------------------
_FLAGS = {
"custom-colormap": 0x4000,
"is-compressed": 0x8000,
"has-transparent": 0x2000,
}
_COMPRESSION_TYPES = {
"none": 0xFF,
"rle": 0x01,
"scanline": 0x00,
}
def o16b(i):
return chr(i>>8&255) + chr(i&255)
#
# --------------------------------------------------------------------
##
# (Internal) Image save plugin for the Palm format.
def _save(im, fp, filename, check=0):
if im.mode == "P":
# we assume this is a color Palm image with the standard colormap,
# unless the "info" dict has a "custom-colormap" field
rawmode = "P"
bpp = 8
version = 1
elif im.mode == "L" and im.encoderinfo.has_key("bpp") and im.encoderinfo["bpp"] in (1, 2, 4):
# this is 8-bit grayscale, so we shift it to get the high-order bits, and invert it because
# Palm does greyscale from white (0) to black (1)
bpp = im.encoderinfo["bpp"]
im = im.point(lambda x, shift=8-bpp, maxval=(1 << bpp)-1: maxval - (x >> shift))
# we ignore the palette here
im.mode = "P"
rawmode = "P;" + str(bpp)
version = 1
elif im.mode == "L" and im.info.has_key("bpp") and im.info["bpp"] in (1, 2, 4):
# here we assume that even though the inherent mode is 8-bit grayscale, only
# the lower bpp bits are significant. We invert them to match the Palm.
bpp = im.info["bpp"]
im = im.point(lambda x, maxval=(1 << bpp)-1: maxval - (x & maxval))
# we ignore the palette here
im.mode = "P"
rawmode = "P;" + str(bpp)
version = 1
elif im.mode == "1":
# monochrome -- write it inverted, as is the Palm standard
rawmode = "1;I"
bpp = 1
version = 0
else:
raise IOError, "cannot write mode %s as Palm" % im.mode
if check:
return check
#
# make sure image data is available
im.load()
# write header
cols = im.size[0]
rows = im.size[1]
rowbytes = ((cols + (16/bpp - 1)) / (16 / bpp)) * 2;
transparent_index = 0
compression_type = _COMPRESSION_TYPES["none"]
flags = 0;
if im.mode == "P" and im.info.has_key("custom-colormap"):
flags = flags & _FLAGS["custom-colormap"]
colormapsize = 4 * 256 + 2;
colormapmode = im.palette.mode
colormap = im.getdata().getpalette()
else:
colormapsize = 0
if im.info.has_key("offset"):
offset = (rowbytes * rows + 16 + 3 + colormapsize) / 4;
else:
offset = 0
fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags))
fp.write(chr(bpp))
fp.write(chr(version))
fp.write(o16b(offset))
fp.write(chr(transparent_index))
fp.write(chr(compression_type))
fp.write(o16b(0)) # reserved by Palm
# now write colormap if necessary
if colormapsize > 0:
fp.write(o16b(256))
for i in range(256):
fp.write(chr(i))
if colormapmode == 'RGB':
fp.write(chr(colormap[3 * i]) + chr(colormap[3 * i + 1]) + chr(colormap[3 * i + 2]))
elif colormapmode == 'RGBA':
fp.write(chr(colormap[4 * i]) + chr(colormap[4 * i + 1]) + chr(colormap[4 * i + 2]))
# now convert data to raw form
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, rowbytes, 1))])
fp.flush()
#
# --------------------------------------------------------------------
Image.register_save("Palm", _save)
Image.register_extension("Palm", ".palm")
Image.register_mime("Palm", "image/palm")
|
|
# flake8: noqa I201
from .Child import Child
from .Node import Node
DECL_NODES = [
# type-assignment -> '=' type
Node('TypeInitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Type'),
]),
# typealias-declaration -> attributes? access-level-modifier? 'typealias'
# typealias-name generic-parameter-clause?
# type-assignment
# typealias-name -> identifier
Node('TypealiasDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('TypealiasKeyword', kind='TypealiasToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
# associatedtype-declaration -> attributes? access-level-modifier?
# 'associatedtype' associatedtype-name
# inheritance-clause? type-assignment?
# generic-where-clause?
# associatedtype-name -> identifier
Node('AssociatedtypeDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('AssociatedtypeKeyword', kind='AssociatedtypeToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('Initializer', kind='TypeInitializerClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
]),
Node('FunctionParameterList', kind='SyntaxCollection',
element='FunctionParameter'),
Node('ParameterClause', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ParameterList', kind='FunctionParameterList',
collection_element_name='Parameter'),
Child('RightParen', kind='RightParenToken'),
]),
# -> Type
Node('ReturnClause', kind='Syntax',
children=[
Child('Arrow', kind='ArrowToken'),
Child('ReturnType', kind='Type'),
]),
# function-signature ->
# '(' parameter-list? ')' async? (throws | rethrows)? '->'? type?
Node('FunctionSignature', kind='Syntax',
children=[
Child('Input', kind='ParameterClause'),
Child('AsyncKeyword', kind='IdentifierToken',
classification='Keyword',
text_choices=['async'], is_optional=True),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('Output', kind='ReturnClause', is_optional=True),
]),
# if-config-clause ->
# ('#if' | '#elseif' | '#else') expr? (stmt-list | switch-case-list)
Node('IfConfigClause', kind='Syntax',
children=[
Child('PoundKeyword', kind='Token',
classification='BuildConfigId',
token_choices=[
'PoundIfToken',
'PoundElseifToken',
'PoundElseToken',
]),
Child('Condition', kind='Expr', classification='BuildConfigId',
is_optional=True),
Child('Elements', kind='Syntax',
node_choices=[
Child('Statements', kind='CodeBlockItemList'),
Child('SwitchCases', kind='SwitchCaseList'),
Child('Decls', kind='MemberDeclList'),
]),
]),
Node('IfConfigClauseList', kind='SyntaxCollection',
element='IfConfigClause'),
# if-config-decl -> '#if' expr stmt-list else-if-directive-clause-list
# else-clause? '#endif'
Node('IfConfigDecl', kind='Decl',
children=[
Child('Clauses', kind='IfConfigClauseList',
collection_element_name='Clause'),
Child('PoundEndif', kind='PoundEndifToken',
classification='BuildConfigId'),
]),
Node('PoundErrorDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundError', kind='PoundErrorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundWarningDecl', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundWarning', kind='PoundWarningToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Message', kind='StringLiteralExpr'),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundSourceLocation', kind='Decl',
traits=['Parenthesized'],
children=[
Child('PoundSourceLocation', kind='PoundSourceLocationToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Args', kind='PoundSourceLocationArgs', is_optional=True),
Child('RightParen', kind='RightParenToken')
]),
Node('PoundSourceLocationArgs', kind='Syntax',
children=[
Child('FileArgLabel', kind='IdentifierToken',
text_choices=['file']),
Child('FileArgColon', kind='ColonToken'),
Child('FileName', kind='StringLiteralToken'),
Child('Comma', kind='CommaToken'),
Child('LineArgLabel', kind='IdentifierToken',
text_choices=['line']),
Child('LineArgColon', kind='ColonToken'),
Child('LineNumber', kind='IntegerLiteralToken'),
]),
Node('DeclModifier', kind='Syntax',
children=[
Child('Name', kind='Token', classification='Attribute',
text_choices=[
'class', 'convenience', 'dynamic', 'final', 'infix',
'lazy', 'optional', 'override', 'postfix', 'prefix',
'required', 'static', 'unowned', 'weak', 'private',
'fileprivate', 'internal', 'public', 'open',
'mutating', 'nonmutating', 'indirect', '__consuming',
'actor'
]),
Child('DetailLeftParen', kind='LeftParenToken', is_optional=True),
Child('Detail', kind='IdentifierToken', is_optional=True),
Child('DetailRightParen', kind='RightParenToken', is_optional=True),
]),
Node('InheritedType', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('TypeName', kind='Type'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('InheritedTypeList', kind='SyntaxCollection',
element='InheritedType'),
# type-inheritance-clause -> ':' type
Node('TypeInheritanceClause', kind='Syntax',
children=[
Child('Colon', kind='ColonToken'),
Child('InheritedTypeCollection', kind='InheritedTypeList',
collection_element_name='InheritedType'),
]),
# class-declaration -> attributes? access-level-modifier?
# 'class' class-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' class-members '}'
# class-name -> identifier
Node('ClassDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('ClassKeyword', kind='ClassToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# struct-declaration -> attributes? access-level-modifier?
# 'struct' struct-name
# generic-parameter-clause?
# type-inheritance-clause?
# generic-where-clause?
# '{' struct-members '}'
# struct-name -> identifier
Node('StructDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('StructKeyword', kind='StructToken'),
Child('Identifier', kind='IdentifierToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('ProtocolDecl', kind='Decl',
traits=['DeclGroup', 'IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('ProtocolKeyword', kind='ProtocolToken'),
Child('Identifier', kind='IdentifierToken'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
# extension-declaration -> attributes? access-level-modifier?
# 'extension' extended-type
# type-inheritance-clause?
# generic-where-clause?
# '{' extension-members '}'
# extension-name -> identifier
Node('ExtensionDecl', kind='Decl', traits=['DeclGroup'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('ExtensionKeyword', kind='ExtensionToken'),
Child('ExtendedType', kind='Type'),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
Child('Members', kind='MemberDeclBlock'),
]),
Node('MemberDeclBlock', kind='Syntax', traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Members', kind='MemberDeclList',
collection_element_name='Member'),
Child('RightBrace', kind='RightBraceToken'),
]),
# member-decl-list = member-decl member-decl-list?
Node('MemberDeclList', kind='SyntaxCollection',
element='MemberDeclListItem'),
# member-decl = decl ';'?
Node('MemberDeclListItem', kind='Syntax', omit_when_empty=True,
description='''
A member declaration of a type consisting of a declaration and an
optional semicolon;
''',
children=[
Child('Decl', kind='Decl',
description='The declaration of the type member.'),
Child('Semicolon', kind='SemicolonToken', is_optional=True,
description='An optional trailing semicolon.'),
]),
# source-file = code-block-item-list eof
Node('SourceFile', kind='Syntax',
traits=['WithStatements'],
children=[
Child('Statements', kind='CodeBlockItemList',
collection_element_name='Statement'),
Child('EOFToken', kind='EOFToken')
]),
# initializer -> '=' expr
Node('InitializerClause', kind='Syntax',
children=[
Child('Equal', kind='EqualToken'),
Child('Value', kind='Expr'),
]),
# parameter ->
# external-parameter-name? local-parameter-name ':'
# type '...'? '='? expression? ','?
Node('FunctionParameter', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('FirstName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
# One of these two names needs be optional, we choose the second
# name to avoid backtracking.
Child('SecondName', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Type', kind='Type',
is_optional=True),
Child('Ellipsis', kind='EllipsisToken',
is_optional=True),
Child('DefaultArgument', kind='InitializerClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# declaration-modifier -> access-level-modifier
# | mutation-modifier
# | 'class'
# | 'convenience'
# | 'dynamic'
# | 'final'
# | 'infix'
# | 'lazy'
# | 'optional'
# | 'override'
# | 'postfix'
# | 'prefix'
# | 'required'
# | 'static'
# | 'unowned'
# | 'unowned(safe)'
# | 'unowned(unsafe)'
# | 'weak'
# mutation-modifier -> 'mutating' | 'nonmutating'
Node('ModifierList', kind='SyntaxCollection',
omit_when_empty=True,
element='DeclModifier',
element_name='Modifier'),
Node('FunctionDecl', kind='Decl', traits=['IdentifiedDecl'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('FuncKeyword', kind='FuncToken'),
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'UnspacedBinaryOperatorToken',
'SpacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Signature', kind='FunctionSignature'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('InitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('InitKeyword', kind='InitToken'),
Child('OptionalMark', kind='Token',
token_choices=[
'PostfixQuestionMarkToken',
'InfixQuestionMarkToken',
'ExclamationMarkToken',
],
is_optional=True),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Parameters', kind='ParameterClause'),
Child('ThrowsOrRethrowsKeyword', kind='Token',
is_optional=True,
token_choices=[
'ThrowsToken',
'RethrowsToken',
]),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('DeinitializerDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('DeinitKeyword', kind='DeinitToken'),
Child('Body', kind='CodeBlock'),
]),
Node('SubscriptDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('SubscriptKeyword', kind='SubscriptToken'),
Child('GenericParameterClause', kind='GenericParameterClause',
is_optional=True),
Child('Indices', kind='ParameterClause'),
Child('Result', kind='ReturnClause'),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True),
# the body is not necessary inside a protocol definition
Child('Accessor', kind='Syntax', is_optional=True,
node_choices=[
Child('Accessors', kind='AccessorBlock'),
Child('Getter', kind='CodeBlock')]),
]),
# access-level-modifier -> 'private' | 'private' '(' 'set' ')'
# | 'fileprivate' | 'fileprivate' '(' 'set' ')'
# | 'internal' | 'internal' '(' 'set' ')'
# | 'public' | 'public' '(' 'set' ')'
# | 'open' | 'open' '(' 'set' ')'
Node('AccessLevelModifier', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('Modifier', kind='IdentifierToken',
is_optional=True),
Child('RightParen', kind='RightParenToken',
is_optional=True),
]),
# FIXME: technically misnamed; should be "ImportPathComponent"
Node('AccessPathComponent', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('TrailingDot', kind='PeriodToken', is_optional=True),
]),
# FIXME: technically misnamed; should be "ImportPath"
Node('AccessPath', kind='SyntaxCollection', element='AccessPathComponent'),
Node('ImportDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('ImportTok', kind='ImportToken'),
Child('ImportKind', kind='Token', is_optional=True,
token_choices=[
'TypealiasToken', 'StructToken', 'ClassToken',
'EnumToken', 'ProtocolToken', 'VarToken', 'LetToken',
'FuncToken',
]),
Child('Path', kind='AccessPath',
collection_element_name='PathComponent'),
]),
# (value)
Node('AccessorParameter', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='IdentifierToken'),
Child('RightParen', kind='RightParenToken'),
]),
Node('AccessorDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifier', kind='DeclModifier', is_optional=True),
Child('AccessorKind', kind='Token',
text_choices=[
'get', 'set', 'didSet', 'willSet', 'unsafeAddress',
'addressWithOwner', 'addressWithNativeOwner',
'unsafeMutableAddress',
'mutableAddressWithOwner',
'mutableAddressWithNativeOwner',
'_read', '_modify'
]),
Child('Parameter', kind='AccessorParameter', is_optional=True),
Child('Body', kind='CodeBlock', is_optional=True),
]),
Node('AccessorList', kind="SyntaxCollection", element='AccessorDecl'),
Node('AccessorBlock', kind="Syntax", traits=['Braced'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Accessors', kind='AccessorList',
collection_element_name='Accessor'),
Child('RightBrace', kind='RightBraceToken'),
]),
# Pattern: Type = Value { get {} },
Node('PatternBinding', kind="Syntax",
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation', is_optional=True),
Child('Initializer', kind='InitializerClause', is_optional=True),
Child('Accessor', kind='Syntax', is_optional=True,
node_choices=[
Child('Accessors', kind='AccessorBlock'),
Child('Getter', kind='CodeBlock')]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('PatternBindingList', kind="SyntaxCollection",
element='PatternBinding'),
Node('VariableDecl', kind='Decl',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True),
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Bindings', kind='PatternBindingList',
collection_element_name='Binding'),
]),
Node('EnumCaseElement', kind='Syntax',
description='''
An element of an enum case, containing the name of the case and,
optionally, either associated values or an assignment to a raw value.
''',
traits=['WithTrailingComma'],
children=[
Child('Identifier', kind='IdentifierToken',
description='The name of this case.'),
Child('AssociatedValue', kind='ParameterClause', is_optional=True,
description='The set of associated values of the case.'),
Child('RawValue', kind='InitializerClause', is_optional=True,
description='''
The raw value of this enum element, if present.
'''),
Child('TrailingComma', kind='CommaToken', is_optional=True,
description='''
The trailing comma of this element, if the case has
multiple elements.
'''),
]),
Node('EnumCaseElementList', kind='SyntaxCollection',
description='A collection of 0 or more `EnumCaseElement`s.',
element='EnumCaseElement'),
Node('EnumCaseDecl', kind='Decl',
description='''
A `case` declaration of a Swift `enum`. It can have 1 or more
`EnumCaseElement`s inside, each declaring a different case of the
enum.
''',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True,
description='''
The attributes applied to the case declaration.
'''),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True,
description='''
The declaration modifiers applied to the case declaration.
'''),
Child('CaseKeyword', kind='CaseToken',
description='The `case` keyword for this case.'),
Child('Elements', kind='EnumCaseElementList',
collection_element_name='Element',
description='The elements this case declares.')
]),
Node('EnumDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `enum` declaration.',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True,
description='''
The attributes applied to the enum declaration.
'''),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True,
description='''
The declaration modifiers applied to the enum declaration.
'''),
Child('EnumKeyword', kind='EnumToken',
description='''
The `enum` keyword for this declaration.
'''),
Child('Identifier', kind='IdentifierToken',
description='''
The name of this enum.
'''),
Child('GenericParameters', kind='GenericParameterClause',
is_optional=True,
description='''
The generic parameters, if any, for this enum.
'''),
Child('InheritanceClause', kind='TypeInheritanceClause',
is_optional=True,
description='''
The inheritance clause describing conformances or raw
values for this enum.
'''),
Child('GenericWhereClause', kind='GenericWhereClause',
is_optional=True,
description='''
The `where` clause that applies to the generic parameters of
this enum.
'''),
Child('Members', kind='MemberDeclBlock',
description='''
The cases and other members of this enum.
''')
]),
# operator-decl -> attribute? modifiers? 'operator' operator
Node('OperatorDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `operator` declaration.',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True,
description='''
The attributes applied to the 'operator' declaration.
'''),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True,
classification='Attribute',
description='''
The declaration modifiers applied to the 'operator'
declaration.
'''),
Child('OperatorKeyword', kind='OperatorToken'),
Child('Identifier', kind='Token',
token_choices=[
'UnspacedBinaryOperatorToken',
'SpacedBinaryOperatorToken',
'PrefixOperatorToken',
'PostfixOperatorToken',
]),
Child('OperatorPrecedenceAndTypes', kind='OperatorPrecedenceAndTypes',
description='''
Optionally specify a precedence group and designated types.
''',
is_optional=True),
]),
Node('IdentifierList', kind='SyntaxCollection',
element='IdentifierToken'),
# infix-operator-group -> ':' identifier ','? identifier?
Node('OperatorPrecedenceAndTypes', kind='Syntax',
description='''
A clause to specify precedence group in infix operator declarations, and designated types in any operator declaration.
''',
children=[
Child('Colon', kind='ColonToken'),
Child('PrecedenceGroupAndDesignatedTypes', kind='IdentifierList',
collection_element_name='PrecedenceGroupAndDesignatedType',
description='''
The precedence group and designated types for this operator
'''),
]),
# precedence-group-decl -> attributes? modifiers? 'precedencegroup'
# identifier '{' precedence-group-attribute-list
# '}'
Node('PrecedenceGroupDecl', kind='Decl', traits=['IdentifiedDecl'],
description='A Swift `precedencegroup` declaration.',
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True,
description='''
The attributes applied to the 'precedencegroup' declaration.
'''),
Child('Modifiers', kind='ModifierList',
collection_element_name='Modifier', is_optional=True,
description='''
The declaration modifiers applied to the 'precedencegroup'
declaration.
'''),
Child('PrecedencegroupKeyword', kind='PrecedencegroupToken'),
Child('Identifier', kind='IdentifierToken',
description='''
The name of this precedence group.
'''),
Child('LeftBrace', kind='LeftBraceToken'),
Child('GroupAttributes', kind='PrecedenceGroupAttributeList',
collection_element_name='GroupAttribute',
description='''
The characteristics of this precedence group.
'''),
Child('RightBrace', kind='RightBraceToken'),
]),
# precedence-group-attribute-list ->
# (precedence-group-relation | precedence-group-assignment |
# precedence-group-associativity )*
Node('PrecedenceGroupAttributeList', kind='SyntaxCollection',
element='Syntax', element_name='PrecedenceGroupAttribute',
element_choices=[
'PrecedenceGroupRelation',
'PrecedenceGroupAssignment',
'PrecedenceGroupAssociativity'
]),
# precedence-group-relation ->
# ('higherThan' | 'lowerThan') ':' precedence-group-name-list
Node('PrecedenceGroupRelation', kind='Syntax',
description='''
Specify the new precedence group's relation to existing precedence
groups.
''',
children=[
Child('HigherThanOrLowerThan', kind='IdentifierToken',
classification='Keyword',
text_choices=[
'higherThan', 'lowerThan',
],
description='''
The relation to specified other precedence groups.
'''),
Child('Colon', kind='ColonToken'),
Child('OtherNames', kind='PrecedenceGroupNameList',
collection_element_name='OtherName',
description='''
The name of other precedence group to which this precedence
group relates.
'''),
]),
# precedence-group-name-list ->
# identifier (',' identifier)*
Node('PrecedenceGroupNameList', kind='SyntaxCollection',
element='PrecedenceGroupNameElement'),
Node('PrecedenceGroupNameElement', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# precedence-group-assignment ->
# 'assignment' ':' ('true' | 'false')
Node('PrecedenceGroupAssignment', kind='Syntax',
description='''
Specifies the precedence of an operator when used in an operation
that includes optional chaining.
''',
children=[
Child('AssignmentKeyword', kind='IdentifierToken',
text_choices=['assignment']),
Child('Colon', kind='ColonToken'),
Child('Flag', kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
],
description='''
When true, an operator in the corresponding precedence group
uses the same grouping rules during optional chaining as the
assignment operators from the standard library. Otherwise,
operators in the precedence group follows the same optional
chaining rules as operators that don't perform assignment.
'''),
]),
# precedence-group-associativity ->
# 'associativity' ':' ('left' | 'right' | 'none')
Node('PrecedenceGroupAssociativity', kind='Syntax',
description='''
Specifies how a sequence of operators with the same precedence level
are grouped together in the absence of grouping parentheses.
''',
children=[
Child('AssociativityKeyword', kind='IdentifierToken',
classification='Keyword', text_choices=['associativity']),
Child('Colon', kind='ColonToken'),
Child('Value', kind='IdentifierToken',
text_choices=['left', 'right', 'none'],
description='''
Operators that are `left`-associative group left-to-right.
Operators that are `right`-associative group right-to-left.
Operators that are specified with an associativity of `none`
don't associate at all
'''),
]),
]
|
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
#-# Modifiedby D.C.-G. for translation purpose
from OpenGL import GL
import numpy
import os
from albow import TableView, TableColumn, Label, Button, Column, CheckBox, AttrRef, Row, ask, alert, input_text_buttons, TabPanel
from albow.table_view import TableRowView
from albow.translate import _
from config import config
from editortools.editortool import EditorTool
from editortools.tooloptions import ToolOptions
from glbackground import Panel
from glutils import DisplayList
from mceutils import loadPNGTexture, alertException, drawTerrainCuttingWire, drawCube
from operation import Operation
import pymclevel
from pymclevel.box import BoundingBox, FloatBox
from pymclevel import nbt
import logging
import version_utils
from nbtexplorer import loadFile, saveFile, NBTExplorerToolPanel
import pygame
log = logging.getLogger(__name__)
class PlayerRemoveOperation(Operation):
undoTag = None
def __init__(self, tool, player="Player (Single Player)"):
super(PlayerRemoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.player = player
self.level = self.tool.editor.level
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
if self.player == "Player (Single Player)":
answer = ask(_("Are you sure you want to delete the default player?"), ["Yes", "Cancel"])
if answer == "Cancel":
return
self.player = "Player"
if recordUndo:
self.undoTag = self.level.getPlayerTag(self.player)
self.level.players.remove(self.player)
if self.tool.panel:
if self.player != "Player":
#self.tool.panel.players.remove(version_utils.getPlayerNameFromUUID(self.player))
self.tool.panel.players.remove(version_utils.playercache.getPlayerFromUUID(self.player))
else:
self.tool.panel.players.remove("Player (Single Player)")
while self.tool.panel.table.index >= len(self.tool.panel.players):
self.tool.panel.table.index -= 1
if len(self.tool.panel.players) == 0:
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
self.tool.movingPlayer = None
pos = self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
del self.tool.playerPos[self.editor.level.dimNo][pos]
if self.player != "Player":
del self.tool.playerTexture[self.player]
else:
del self.level.root_tag["Data"]["Player"]
del self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
self.canUndo = True
def undo(self):
if not (self.undoTag is None):
if self.player != "Player":
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.undoTag
else:
self.level.root_tag["Data"]["Player"] = self.undoTag
self.level.players.append(self.player)
if self.tool.panel:
if self.player != "Player":
self.tool.panel.players.append(version_utils.playercache.getPlayerFromUUID(self.player))
else:
self.tool.panel.players.append("Player (Single Player)")
if "[No players]" in self.tool.panel.players:
self.tool.panel.players.remove("[No players]")
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
def redo(self):
self.perform()
class PlayerAddOperation(Operation):
playerTag = None
def __init__(self, tool):
super(PlayerAddOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.level = self.tool.editor.level
self.canUndo = False
def perform(self, recordUndo=True):
initial = ""
allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
while True:
self.player = input_text_buttons("Enter a Player Name: ", 160, initial=initial, allowed_chars=allowed_chars)
if self.player is None:
return
elif len(self.player) > 16:
alert("Name too long. Maximum name length is 16.")
initial = self.player
elif len(self.player) < 1:
alert("Name too short. Minimum name length is 1.")
initial = self.player
else:
break
try:
data = version_utils.playercache.getPlayerInfo(self.player, force=True)
if isinstance(data, tuple):
self.uuid = data[0]
self.player = data[1]
else:
self.uuid = data
except:
action = ask("Could not get {}'s UUID. Please make sure that you are connected to the internet and that the player {} exists.".format(self.player, self.player), ["Enter UUID manually", "Cancel"])
if action != "Enter UUID manually":
return
self.uuid = input_text_buttons("Enter a Player UUID: ", 160)
if not self.uuid:
return
self.player = version_utils.playercache.getPlayerFromUUID(self.uuid)
if self.player == self.uuid.replace("-", ""):
if ask("UUID was not found. Continue anyways?") == "Cancel":
return
if self.uuid in self.level.players:
alert("Player already exists in this World.")
return
self.playerTag = self.newPlayer()
if self.tool.panel:
self.tool.panel.players.append(self.player)
if self.level.oldPlayerFolderFormat:
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.playerTag
self.level.players.append(self.player)
if self.tool.panel:
self.tool.panel.player_UUID[self.player] = self.player
else:
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
self.tool.panel.player_UUID[self.player] = self.uuid
self.tool.playerPos[self.editor.level.dimNo][(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.editor.level.dimNo][self.uuid] = (0,0,0)
self.tool.playerTexture[self.uuid] = loadPNGTexture(version_utils.getPlayerSkin(self.uuid, force=False))
self.tool.markerList.invalidate()
self.tool.recordMove = False
self.tool.movingPlayer = self.uuid
if self.tool.panel:
self.tool.hidePanel()
self.tool.showPanel()
self.canUndo = True
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.inOtherDimension[self.editor.level.dimNo].append(self.uuid)
def newPlayer(self):
playerTag = nbt.TAG_Compound()
playerTag['Air'] = nbt.TAG_Short(300)
playerTag['AttackTime'] = nbt.TAG_Short(0)
playerTag['DeathTime'] = nbt.TAG_Short(0)
playerTag['Fire'] = nbt.TAG_Short(-20)
playerTag['Health'] = nbt.TAG_Short(20)
playerTag['HurtTime'] = nbt.TAG_Short(0)
playerTag['Score'] = nbt.TAG_Int(0)
playerTag['FallDistance'] = nbt.TAG_Float(0)
playerTag['OnGround'] = nbt.TAG_Byte(0)
playerTag['Dimension'] = nbt.TAG_Int(self.editor.level.dimNo)
playerTag["Inventory"] = nbt.TAG_List()
playerTag['Motion'] = nbt.TAG_List([nbt.TAG_Double(0) for i in range(3)])
spawn = self.level.playerSpawnPosition()
spawnX = spawn[0]
spawnZ = spawn[2]
blocks = [self.level.blockAt(spawnX, i, spawnZ) for i in range(self.level.Height)]
i = self.level.Height
done = False
for index, b in enumerate(reversed(blocks)):
if b != 0 and not done:
i = index
done = True
spawnY = self.level.Height - i
playerTag['Pos'] = nbt.TAG_List([nbt.TAG_Double([spawnX, spawnY, spawnZ][i]) for i in range(3)])
playerTag['Rotation'] = nbt.TAG_List([nbt.TAG_Float(0), nbt.TAG_Float(0)])
return playerTag
def undo(self):
self.level.players.remove(self.uuid)
self.tool.movingPlayer = None
if self.tool.panel:
self.tool.panel.players.remove(self.player)
self.tool.panel.player_UUID.pop(self.player)
del self.tool.playerPos[(0,0,0)]
del self.tool.revPlayerPos[self.uuid]
del self.tool.playerTexture[self.uuid]
os.remove(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.remove(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
def redo(self):
if not (self.playerTag is None):
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
self.tool.panel.players.append(self.player)
self.tool.panel.player_UUID[self.player] = self.uuid
self.tool.playerTexture[self.uuid] = loadPNGTexture(version_utils.getPlayerSkin(self.uuid))
self.tool.playerPos[(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.uuid] = (0,0,0)
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
class PlayerMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos, player="Player", yp=(None, None)):
super(PlayerMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.canUndo = False
self.pos = pos
self.player = player
self.yp = yp
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
try:
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception, e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
yaw, pitch = self.yp
if yaw is not None and pitch is not None:
level.setPlayerOrientation((yaw, pitch), self.player)
level.setPlayerPosition(self.pos, self.player)
level.setPlayerDimension(level.dimNo, self.player)
self.tool.markerList.invalidate()
self.canUndo = True
except pymclevel.PlayerNotFound, e:
print "Player move failed: ", e
def undo(self):
if not (self.undoPos is None):
level = self.tool.editor.level
try:
self.redoPos = level.getPlayerPosition(self.player)
self.redoDim = level.getPlayerDimension(self.player)
self.redoYP = level.getPlayerOrientation(self.player)
except Exception, e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.undoPos, self.player)
level.setPlayerDimension(self.undoDim, self.player)
level.setPlayerOrientation(self.undoYP, self.player)
self.tool.markerList.invalidate()
def redo(self):
if not (self.redoPos is None):
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception, e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.redoPos, self.player)
level.setPlayerDimension(self.redoDim, self.player)
level.setPlayerOrientation(self.redoYP, self.player)
self.tool.markerList.invalidate()
@staticmethod
def bufferSize():
return 20
class SpawnPositionInvalid(Exception):
pass
def okayAt63(level, pos):
"""blocks 63 or 64 must be occupied"""
# return level.blockAt(pos[0], 63, pos[2]) != 0 or level.blockAt(pos[0], 64, pos[2]) != 0
return True
def okayAboveSpawn(level, pos):
"""3 blocks above spawn must be open"""
return not any([level.blockAt(pos[0], pos[1] + i, pos[2]) for i in range(1, 4)])
def positionValid(level, pos):
try:
return okayAt63(level, pos) and okayAboveSpawn(level, pos)
except EnvironmentError:
return False
class PlayerSpawnMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos):
super(PlayerSpawnMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool, self.pos = tool, pos
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
level = self.tool.editor.level
'''
if isinstance(level, pymclevel.MCInfdevOldLevel):
if not positionValid(level, self.pos):
if config.spawn.spawnProtection.get():
raise SpawnPositionInvalid(
"You cannot have two air blocks at Y=63 and Y=64 in your spawn point's column. Additionally, you cannot have a solid block in the three blocks above your spawn point. It's weird, I know.")
'''
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.pos)
self.tool.markerList.invalidate()
self.canUndo = True
def undo(self):
if self.undoPos is not None:
level = self.tool.editor.level
self.redoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.undoPos)
self.tool.markerList.invalidate()
def redo(self):
if self.redoPos is not None:
level = self.tool.editor.level
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.redoPos)
self.tool.markerList.invalidate()
class PlayerPositionPanel(Panel):
def __init__(self, tool):
Panel.__init__(self)
self.tool = tool
self.player_UUID = {}
self.level = tool.editor.level
if hasattr(self.level, 'players'):
players = self.level.players or ["[No players]"]
if not self.level.oldPlayerFolderFormat:
for player in players:
if player != "Player" and player != "[No players]":
if len(player) > 4 and player[4] == "-":
os.rename(os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player+".dat"), os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player.replace("-", "", 1)+".dat"))
player = player.replace("-", "", 1)
data = version_utils.playercache.getPlayerInfo(player)
if isinstance(data, tuple):
self.player_UUID[data[1]] = data[0]
else:
self.player_UUID[player] = data
if "Player" in players:
self.player_UUID["Player (Single Player)"] = "Player"
if "[No players]" not in players:
players = sorted(self.player_UUID.keys(), key=lambda x: False if x == "Player (Single Player)" else x)
else:
self.player_UUID = {"[No players]": "[No players]"}
else:
players = ["Player (Single Player)"]
self.players = players
self.pages = TabPanel()
tab_height = self.pages.tab_height
max_height = tab_height + self.tool.editor.mainViewport.height - self.tool.editor.toolbar.height - self.tool.editor.subwidgets[0].height - self.pages.margin * 2
#-# Uncomment the following line to have a maximum height for this panel.
# max_height = min(max_height, 500)
self.editNBTDataButton = Button("Edit NBT", action=self.editNBTData, tooltipText="Open the NBT Explorer to edit player's attributes and inventory")
addButton = Button("Add", action=self.tool.addPlayer)
removeButton = Button("Remove", action=self.tool.removePlayer)
gotoButton = Button("Goto", action=self.tool.gotoPlayer)
gotoCameraButton = Button("Goto View", action=self.tool.gotoPlayerCamera)
moveButton = Button("Move", action=self.tool.movePlayer)
moveToCameraButton = Button("Align to Camera", action=self.tool.movePlayerToCamera)
reloadSkin = Button("Reload Skins", action=self.tool.reloadSkins, tooltipText="This pulls skins from the online server, so this may take a while")
btns = [self.editNBTDataButton]
if not isinstance(self.level, pymclevel.leveldbpocket.PocketLeveldbWorld):
btns.extend([addButton, removeButton])
btns.extend([gotoButton, gotoCameraButton, moveButton, moveToCameraButton, reloadSkin])
btns = Column(btns, margin=0, spacing=2)
h = max_height - btns.height - self.pages.margin * 2 - 2 - self.font.get_linesize() * 2
col = Label('')
def close():
self.pages.show_page(col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject={}, height=max_height, \
close_text="Go Back", no_header=True, close_action=close,
load_text=None)
self.nbttree.shrink_wrap()
self.nbtpage = Column([self.nbttree])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.set_rect(map(lambda x:x+self.margin, self.nbttree._rect))
tableview = TableView(nrows=(h - (self.font.get_linesize() * 2.5)) / self.font.get_linesize(),
header_height=self.font.get_linesize(),
columns=[TableColumn("Player Name(s):", (self.nbttree.width - (self.margin * 3)) / 3),
TableColumn("Player UUID(s):", (self.nbttree.width - (self.margin * 3)))],
)
tableview.index = 0
tableview.num_rows = lambda: len(players)
tableview.row_data = lambda i: (players[i],self.player_UUID[players[i]])
tableview.row_is_selected = lambda x: x == tableview.index
tableview.zebra_color = (0, 0, 0, 48)
def selectTableRow(i, evt):
tableview.index = i
tableview.click_row = selectTableRow
def mouse_down(e):
if e.button == 1 and e.num_clicks > 1:
self.editNBTData()
TableRowView.mouse_down(tableview.rows, e)
tableview.rows.mouse_down = mouse_down
tableview.rows.tooltipText = "Double-click or use the button below to edit the NBT Data."
self.table = tableview
col.set_parent(None)
self.col = col = Column([tableview, btns], spacing=2)
self.pages.add_page("Players", col, 0)
self.pages.shrink_wrap()
self.pages.show_page(col)
self.add(self.pages)
self.shrink_wrap()
self.max_height = max_height
def editNBTData(self):
player = self.selectedPlayer
if player == 'Player (Single Player)':
alert("Not yet implemented.\nUse the NBT Explorer to edit this player.")
elif player == '[No players]':
return
else:
player = self.level.getPlayerTag(self.selectedPlayer)
if player is not None:
self.pages.remove_page(self.nbtpage)
def close():
self.pages.show_page(self.col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject=player, fileName=None,
savePolicy=-1, dataKeyName=None,
height=self.max_height, no_header=True, close_text="Go Back",
close_action=close, load_text=None,
copy_data=False)
self.nbtpage = Column([self.nbttree,])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.show_page(self.nbtpage)
else:
alert(_("Unable to load player %s" % self.selectedPlayer()))
@property
def selectedPlayer(self):
if not self.level.oldPlayerFolderFormat:
player = self.players[self.table.index]
if player != "Player (Single Player)" and player != "[No players]":
return self.player_UUID[player]
else:
return player
else:
return self.players[self.table.index]
def key_down(self, evt):
self.dispatch_key('key_down', evt)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if self.pages.current_page == self.col:
if keyname == "Up" and self.table.index > 0:
self.table.index -= 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == "Down" and self.table.index < len(self.players) - 1:
self.table.index += 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == 'Page down':
self.table.index = min(len(self.players) - 1, self.table.index + self.table.rows.num_rows())
elif keyname == 'Page up':
self.table.index = max(0, self.table.index - self.table.rows.num_rows())
elif keyname == 'Return':
if self.selectedPlayer != None:
self.editNBTData()
if self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 > self.table.index or self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 < self.table.index:
self.table.rows.scroll_to_item(self.table.index)
elif self.pages.current_page == self.nbtpage:
self.nbttree.dispatch_key(name, evt)
class PlayerPositionTool(EditorTool):
surfaceBuild = True
toolIconName = "player"
tooltipText = "Players"
movingPlayer = None
recordMove = True
def reloadTextures(self):
self.charTex = loadPNGTexture('char.png')
@alertException
def addPlayer(self):
op = PlayerAddOperation(self)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def removePlayer(self):
player = self.panel.selectedPlayer
if player != "[No players]":
op = PlayerRemoveOperation(self, player)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def movePlayer(self):
if self.panel.selectedPlayer != "[No players]":
self.movingPlayer = self.panel.selectedPlayer
if self.movingPlayer == "Player (Single Player)":
self.movingPlayer = "Player"
@alertException
def movePlayerToCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
if player != "[No players]":
pos = self.editor.mainViewport.cameraPosition
y = self.editor.mainViewport.yaw
p = self.editor.mainViewport.pitch
op = PlayerMoveOperation(self, pos, player, (y, p))
self.movingPlayer = None
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
def delete_skin(self, uuid):
del self.playerTexture[uuid]
self.playerTexture[uuid] = loadPNGTexture('char.png')
@alertException
def reloadSkins(self):
#result = ask("This pulls skins from the online server, so this may take a while", ["Ok", "Cancel"])
#if result == "Ok":
try:
for player in self.editor.level.players:
if player != "Player" and player in self.playerTexture.keys():
del self.playerTexture[player]
self.playerTexture[player] = loadPNGTexture(version_utils.getPlayerSkin(player, force=True, instance=self))
except:
raise Exception("Could not connect to the skins server, please check your Internet connection and try again.")
def gotoPlayerCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
pos = self.editor.level.getPlayerPosition(player)
y, p = self.editor.level.getPlayerOrientation(player)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.yaw = y
self.editor.mainViewport.pitch = p
self.editor.mainViewport.stopMoving()
self.editor.mainViewport.invalidate()
except pymclevel.PlayerNotFound:
pass
def gotoPlayer(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
if self.editor.mainViewport.pitch < 0:
self.editor.mainViewport.pitch = -self.editor.mainViewport.pitch
self.editor.mainViewport.cameraVector = self.editor.mainViewport._cameraVector()
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.getPlayerPosition(player)
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
except pymclevel.PlayerNotFound:
pass
def __init__(self, *args):
EditorTool.__init__(self, *args)
self.reloadTextures()
self.nonSavedPlayers = []
textureVerticesHead = numpy.array(
(
# Backside of Head
24, 16, # Bottom Left
24, 8, # Top Left
32, 8, # Top Right
32, 16, # Bottom Right
# Front of Head
8, 16,
8, 8,
16, 8,
16, 16,
#
24, 0,
16, 0,
16, 8,
24, 8,
#
16, 0,
8, 0,
8, 8,
16, 8,
#
8, 8,
0, 8,
0, 16,
8, 16,
16, 16,
24, 16,
24, 8,
16, 8,
), dtype='f4')
textureVerticesHat = numpy.array(
(
56, 16,
56, 8,
64, 8,
64, 16,
48, 16,
48, 8,
40, 8,
40, 16,
56, 0,
48, 0,
48, 8,
56, 8,
48, 0,
40, 0,
40, 8,
48, 8,
40, 8,
32, 8,
32, 16,
40, 16,
48, 16,
56, 16,
56, 8,
48, 8,
), dtype='f4')
textureVerticesHead.shape = (24, 2)
textureVerticesHat.shape = (24, 2)
textureVerticesHead *= 4
textureVerticesHead[:, 1] *= 2
textureVerticesHat *= 4
textureVerticesHat[:, 1] *= 2
self.texVerts = (textureVerticesHead, textureVerticesHat)
self.playerPos = {0:{}, -1:{}, 1:{}}
self.playerTexture = {}
self.revPlayerPos = {0:{}, -1:{}, 1:{}}
self.inOtherDimension = {0: [], 1: [], -1: []}
self.markerList = DisplayList()
panel = None
def showPanel(self):
if not self.panel:
self.panel = PlayerPositionPanel(self)
self.panel.centery = (self.editor.mainViewport.height - self.editor.toolbar.height) / 2 + self.editor.subwidgets[0].height
self.panel.left = self.editor.left
self.editor.add(self.panel)
def hidePanel(self):
if self.panel and self.panel.parent:
self.panel.parent.remove(self.panel)
self.panel = None
def drawToolReticle(self):
if self.movingPlayer is None:
return
pos, direction = self.editor.blockFaceUnderCursor
dim = self.editor.level.getPlayerDimension(self.movingPlayer)
pos = (pos[0], pos[1] + 2, pos[2])
x, y, z = pos
# x,y,z=map(lambda p,d: p+d, pos, direction)
GL.glEnable(GL.GL_BLEND)
GL.glColor(1.0, 1.0, 1.0, 0.5)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)))
drawTerrainCuttingWire(BoundingBox((x, y - 1, z), (1, 1, 1)))
#drawTerrainCuttingWire( BoundingBox((x,y-2,z), (1,1,1)) )
GL.glDisable(GL.GL_DEPTH_TEST)
markerLevel = None
def drawToolMarkers(self):
if not config.settings.drawPlayerHeads.get():
return
if self.markerLevel != self.editor.level:
self.markerList.invalidate()
self.markerLevel = self.editor.level
self.markerList.call(self._drawToolMarkers)
def _drawToolMarkers(self):
GL.glColor(1.0, 1.0, 1.0, 0.5)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glMatrixMode(GL.GL_MODELVIEW)
for player in self.editor.level.players:
try:
pos = self.editor.level.getPlayerPosition(player)
yaw, pitch = self.editor.level.getPlayerOrientation(player)
dim = self.editor.level.getPlayerDimension(player)
self.inOtherDimension[dim].append(player)
self.playerPos[dim][pos] = player
self.revPlayerPos[dim][player] = pos
if player != "Player" and config.settings.downloadPlayerSkins.get():
self.playerTexture[player] = loadPNGTexture(version_utils.getPlayerSkin(player, force=False))
else:
self.playerTexture[player] = self.charTex
if dim != self.editor.level.dimNo:
continue
x, y, z = pos
GL.glPushMatrix()
GL.glTranslate(x, y, z)
GL.glRotate(-yaw, 0, 1, 0)
GL.glRotate(pitch, 1, 0, 0)
GL.glColor(1, 1, 1, 1)
self.drawCharacterHead(0, 0, 0, (x,y,z), self.editor.level.dimNo)
GL.glPopMatrix()
# GL.glEnable(GL.GL_BLEND)
drawTerrainCuttingWire(FloatBox((x - .5, y - .5, z - .5), (1, 1, 1)),
c0=(0.3, 0.9, 0.7, 1.0),
c1=(0, 0, 0, 0),
)
#GL.glDisable(GL.GL_BLEND)
except Exception, e:
print repr(e)
continue
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCharacterHead(self, x, y, z, realCoords=None, dim=0):
GL.glEnable(GL.GL_CULL_FACE)
origin = (x - 0.25, y - 0.25, z - 0.25)
size = (0.5, 0.5, 0.5)
box = FloatBox(origin, size)
hat_origin = (x - 0.275, y - 0.275, z - 0.275)
hat_size = (0.55, 0.55, 0.55)
hat_box = FloatBox(hat_origin, hat_size)
if realCoords is not None and self.playerPos[dim][realCoords] != "Player" and config.settings.downloadPlayerSkins.get():
drawCube(box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[0])
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
drawCube(hat_box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[1])
GL.glDisable(GL.GL_BLEND)
else:
drawCube(box,
texture=self.charTex, textureVertices=self.texVerts[0])
GL.glDisable(GL.GL_CULL_FACE)
#@property
#def statusText(self):
# if not self.panel:
# return ""
# player = self.panel.selectedPlayer
# if player == "Player":
# return "Click to move the player"
#
# return _("Click to move the player \"{0}\"").format(player)
@alertException
def mouseDown(self, evt, pos, direction):
if self.movingPlayer is None:
return
pos = (pos[0] + 0.5, pos[1] + 2.75, pos[2] + 0.5)
op = PlayerMoveOperation(self, pos, self.movingPlayer)
self.movingPlayer = None
if self.recordMove:
self.editor.addOperation(op)
addingMoving = False
else:
self.editor.performWithRetry(op) #Prevent recording of Undo when adding player
self.recordMove = True
addingMoving = True
if op.canUndo and not addingMoving:
self.editor.addUnsavedEdit()
def keyDown(self, evt):
keyname = evt.dict.get('keyname', None) or self.editor.get_root().getKey(evt)
if not self.recordMove:
if not pygame.key.get_focused():
return
if keyname == "Escape":
self.recordMove = True
if self.panel and self.panel.__class__ == PlayerPositionPanel:
self.panel.key_down(evt)
def keyUp(self, evt):
pass
def levelChanged(self):
self.markerList.invalidate()
@alertException
def toolSelected(self):
self.showPanel()
self.movingPlayer = None
@alertException
def toolReselected(self):
if self.panel:
self.gotoPlayer()
class PlayerSpawnPositionOptions(ToolOptions):
def __init__(self, tool):
Panel.__init__(self)
self.tool = tool
self.spawnProtectionCheckBox = CheckBox(ref=AttrRef(tool, "spawnProtection"))
self.spawnProtectionLabel = Label("Spawn Position Safety")
self.spawnProtectionLabel.mouse_down = self.spawnProtectionCheckBox.mouse_down
tooltipText = "Minecraft will randomly move your spawn point if you try to respawn in a column where there are no blocks at Y=63 and Y=64. Only uncheck this box if Minecraft is changed."
self.spawnProtectionLabel.tooltipText = self.spawnProtectionCheckBox.tooltipText = tooltipText
row = Row((self.spawnProtectionCheckBox, self.spawnProtectionLabel))
col = Column((Label("Spawn Point Options"), row, Button("OK", action=self.dismiss)))
self.add(col)
self.shrink_wrap()
class PlayerSpawnPositionTool(PlayerPositionTool):
surfaceBuild = True
toolIconName = "playerspawn"
tooltipText = "Move Spawn Point\nRight-click for options"
def __init__(self, *args):
PlayerPositionTool.__init__(self, *args)
self.optionsPanel = PlayerSpawnPositionOptions(self)
def toolEnabled(self):
return self.editor.level.dimNo == 0
def showPanel(self):
self.panel = Panel()
button = Button("Goto Spawn", action=self.gotoSpawn)
self.panel.add(button)
self.panel.shrink_wrap()
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def gotoSpawn(self):
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.playerSpawnPosition()
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
@property
def statusText(self):
return "Click to set the spawn position."
spawnProtection = config.spawn.spawnProtection.property()
def drawToolReticle(self):
pos, direction = self.editor.blockFaceUnderCursor
x, y, z = map(lambda p, d: p + d, pos, direction)
color = (1.0, 1.0, 1.0, 0.5)
if isinstance(self.editor.level, pymclevel.MCInfdevOldLevel) and self.spawnProtection:
if not positionValid(self.editor.level, (x, y, z)):
color = (1.0, 0.0, 0.0, 0.5)
GL.glColor(*color)
GL.glEnable(GL.GL_BLEND)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
color2 = map(lambda a: a * 0.4, color)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)), color2, color)
GL.glDisable(GL.GL_DEPTH_TEST)
def _drawToolMarkers(self):
x, y, z = self.editor.level.playerSpawnPosition()
GL.glColor(1.0, 1.0, 1.0, 1.0)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5 + 0.125 * numpy.sin(self.editor.frames * 0.05), z + 0.5)
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCage(self, x, y, z):
cageTexVerts = numpy.array(pymclevel.MCInfdevOldLevel.materials.blockTextures[52, 0])
pixelScale = 0.5 if self.editor.level.materials.name in ("Pocket", "Alpha") else 1.0
texSize = 16 * pixelScale
cageTexVerts *= pixelScale
cageTexVerts = numpy.array(
[((tx, ty), (tx + texSize, ty), (tx + texSize, ty + texSize), (tx, ty + texSize)) for (tx, ty) in
cageTexVerts], dtype='float32')
GL.glEnable(GL.GL_ALPHA_TEST)
drawCube(BoundingBox((x, y, z), (1, 1, 1)), texture=pymclevel.alphaMaterials.terrainTexture,
textureVertices=cageTexVerts)
GL.glDisable(GL.GL_ALPHA_TEST)
@alertException
def mouseDown(self, evt, pos, direction):
pos = map(lambda p, d: p + d, pos, direction)
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
if "Okay" != ask(str(e), responses=["Okay", "Fix it for me!"]):
level = self.editor.level
status = ""
if not okayAt63(level, pos):
level.setBlockAt(pos[0], 63, pos[2], 1)
status += _("Block added at y=63.\n")
if 59 < pos[1] < 63:
pos[1] = 63
status += _("Spawn point moved upward to y=63.\n")
if not okayAboveSpawn(level, pos):
if pos[1] > 63 or pos[1] < 59:
lpos = (pos[0], pos[1] - 1, pos[2])
if level.blockAt(*pos) == 0 and level.blockAt(*lpos) != 0 and okayAboveSpawn(level, lpos):
pos = lpos
status += _("Spawn point shifted down by one block.\n")
if not okayAboveSpawn(level, pos):
for i in range(1, 4):
level.setBlockAt(pos[0], pos[1] + i, pos[2], 0)
status += _("Blocks above spawn point cleared.\n")
self.editor.invalidateChunks([(pos[0] // 16, pos[2] // 16)])
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
alert(str(e))
return
if len(status):
alert(_("Spawn point fixed. Changes: \n\n") + status)
@alertException
def toolReselected(self):
self.gotoSpawn()
|
|
from snappy import *
from random import randrange
from collections import OrderedDict
import multiprocessing
class NeumannZagierDatum():
"""
description of class, comment entire class
"""
def __init__(self, manifold, engine=None, verbose=False, file_name=None):
self.manifold = manifold
self.engine = engine
self.verbose = verbose
self.file_name = file_name
self._raw_gluing_equations = manifold.gluing_equations()
self.num_shapes = self._raw_gluing_equations.ncols() // 3
self.num_eqns = self._raw_gluing_equations.nrows()
self._eliminated_shapes = self.num_shapes * [1, ]
self.computed_ptolemy = False
self.nz = None
pari.set_real_precision(100)
def every_third_elem(L, shift):
return [L[i] for i in range(shift, len(L), 3)]
def epsilon(CC, d):
return 2**(-CC.precision()//d)
def all_shape_parameters(self, z):
return [z, 1 / (1 - z), 1 - 1 / z]
def in_threes(self, L):
return [L[3 * i : 3 * (i + 1)] for i in range(len(L) // 3)]
def shift_in_threes(self, L, shifts):
return sum([X[s:] + X[:s] for X, s in zip(self.in_threes(L),\
shifts)], [])
def is_geom(self, M, c):
vol = M.volume()
for v in c.volume_numerical():
if abs(v - vol) < 1e-10:
return True
return False
def gluing_equations(self):
eqns = self._raw_gluing_equations
new_cols = self.shift_in_threes(eqns.columns(), [(i - 1) % 3 for\
i in self._eliminated_shapes])
return matrix(new_cols).transpose()
def ABCbar(self):
eqns = self.gluing_equations()
n = self.num_shapes
return [eqns.matrix_from_columns(range(i, 3 * n, 3))\
for i in range(3)]
def target_vector(self):
"""
Answer times pi*i is right-hand side of
gluing equations.
"""
m = self.num_eqns
c = self.manifold.num_cusps()
return vector(ZZ, [2 for i in range(m - 2 * c)] + (2 * c) * [0])
def ABv(self):
A, B, C = self.ABCbar()
one = vector(B.base_ring(), B.ncols() * [1])
return A - B, C - B, self.target_vector() - B * one
def shapes(self, precision=None, shapes=None):
if shapes:
base_shapes = shapes
else:
if precision == None:
base_shapes = [CC(z) for z in\
self.manifold.tetrahedra_shapes(part='rect')]
else:
base_shapes = hypertorsion.snap.\
polished_tetrahedra_shapes(self.manifold,\
precision)
return [self.all_shape_parameters(z)[(i - 1) % 3]
for z, i in zip(base_shapes, self._eliminated_shapes)]
def ABv_square(self, rows_to_eliminate=None):
A, B, v = self.ABv()
M = block_matrix([[B, A, v.column()]])
c = self.manifold.num_cusps()
rows = range(M.nrows())[:-2 * c]
rows += [2 * i + rows[-1] + 1 for i in range(c)]
M = M.matrix_from_rows(rows)
M = M.hermite_form(include_zero_rows=False)
n = A.ncols()
return M.matrix_from_columns(range(n, 2 * n)),\
M.matrix_from_columns(range(n)), M.columns()[-1]
def f_and_fddot(self):
A, B, v = self.ABv_square()
n = A.ncols()
M = block_matrix([[A, B]])
S, U, V = M.smith_form()
d = S.diagonal()
f = V * vector(ZZ, [x / y for x, y in zip(U * v, d)] + n * [0])
assert M * f == v
return vector(f[:n]), vector(f[n:])
def make_B_nondegenerate(self):
while det(self.ABv_square()[1]) == 0:
self._eliminated_shapes = [randrange(3) for\
i in range(self.num_shapes)]
def compute_ptolemy_field_and_embedding(self):
vol = self.manifold.volume()
p = self.manifold.ptolemy_variety(2, 'all')
if self.computed_ptolemy == False:
if self.engine == "retrieve":
try:
s = p.retrieve_solutions(verbose=self.verbose)\
.flatten(depth=2)
except:
s = p.compute_solutions(engine=self.engine,\
verbose=self.verbose).flatten(depth=2)
else:
s = p.compute_solutions(engine=self.engine,\
verbose=self.verbose).flatten(depth=2)
self.computed_ptolemy = s
else:
s = self.computed_ptolemy
for sol in s:
rsol = zip(pari('polroots(%s)' %sol.number_field()),\
sol.numerical())
for root, numerical_sol in rsol:
if abs(vol - numerical_sol.volume_numerical()) < 1e-10:
return sol.number_field(), root
def check(self):
shapes = vector(self.all_log_shapes())
CC = shapes[0].parent()
eqns = self.gluing_equations()
pi_I = CC.pi() * CC.gen()
error = eqns * shapes - pi_I * self.target_vector()
assert error.norm(Infinity) < epsilon(CC, 2)
A, B, v = self.ABv()
z = vector(every_third_elem(shapes, 0))
z_ddot = vector(every_third_elem(shapes, 2))
error = A * z + B * z_ddot - pi_I * v
assert error.norm(Infinity) < epsilon(CC, 2)
def generate_nz_data(self):
self.make_B_nondegenerate()
temp_ABv = self.ABv_square()
A = temp_ABv[0]
B = temp_ABv[1]
nu = temp_ABv[2]
temp_fs = self.f_and_fddot()
f = temp_fs[0]
f_ddot = temp_fs[1]
new_shapes = self.shapes()
pol, embedding = self.compute_ptolemy_field_and_embedding()
new_nz = (A, B, nu, f, f_ddot, pol, new_shapes, embedding)
self.nz = new_nz
if self.file_name != None:
save(new_nz, self.file_name)
class nloop():
"""
Compute the n-loop invariant S_n.
Reference: ``The Quantum Content of the Gluing Equations'' by
Dimofte and Garoufalidis.
"""
def __init__(self, nzdata, n, diagrams):
"""Initializes class variables."""
(A, B, nu, f, f_ddot, _, zees, _) = nzdata
self.A = A
self.B = B
self.nu = nu
self.f = f
self.f_ddot = f_ddot
self.zees = zees
self.CC = self.zees[0].parent()
self.prec = self.zees[0].prec()
self.n = n
self.diagrams = [g for g in diagrams if \
self.feynman_loop_number(g) <= self.n]
self.ver_factor = None
self.prev = OrderedDict()
def exponentiate_list(self, L, E):
return prod([l ** e for l, e in zip(L, E)])
def one_loop(self, precision=None, shapes=None):
CC = self.zees[0].parent()
shapes_dd = [1 - 1 / z for z in self.zees]
D1 = diagonal_matrix(shapes_dd)
D2 = diagonal_matrix([1 / z for z in self.zees])
return (1 / CC(2)) * det(self.A * D1 + self.B * D2) *\
self.exponentiate_list(self.zees, self.f_ddot) *\
self.exponentiate_list(shapes_dd, -self.f)
def pre_comp_polylog(self, index, z):
"""
This function contains the polylogs commonly used
in the calculation of the n-loop invariant.
These polylogs are used a large number of times
in a computation so they are saved to minimize waste.
"""
if index == 1:
return -ln(1 - z)
if index == 0:
return z / (1 - z)
if index == -1:
return z / (z ** 2 - 2 * z + 1)
if index == -2:
return (-z ** 2 - z) / (z ** 3 - 3 * z ** 2 + 3 * z - 1)
if index == -3:
return (z ** 3 + 4 * z ** 2 + z) / (z ** 4 - 4 * z ** 3 +\
6 * z ** 2 - 4 * z + 1)
if index == -4:
return (-z ** 4 - 11 * z ** 3 - 11 * z ** 2 - z) / (z **\
5 - 5 * z ** 4 + 10 * z ** 3 - 10 * z ** 2 + 5 * z - 1)
if index == -5:
return (z ** 5 + 26 * z ** 4 + 66 * z ** 3 + 26 * z ** 2 +\
z) / (z ** 6 - 6 * z ** 5 + 15 * z ** 4 - 20 * z ** 3 +\
15 * z ** 2 - 6 * z + 1)
if index == -6:
return (-z ** 6 - 57 * z ** 5 - 302 * z ** 4 - 302 * z **\
3 - 57 * z ** 2 - z) / (z ** 7 - 7 * z ** 6 + 21 *\
z ** 5 - 35 * z ** 4 + 35 * z ** 3 -\
21 * z ** 2 + 7 * z - 1)
if index == -7:
return (z ** 7 + 120 * z ** 6 + 1191 * z ** 5 + 2416 *\
z ** 4 + 1191 * z ** 3 + 120 * z ** 2 + z) / (z **\
8 - 8 * z ** 7 + 28 * z ** 6 - 56 * z ** 5 + 70 *\
z ** 4 - 56 * z ** 3 + 28 * z ** 2 - 8 * z + 1)
if index == -8:
return (-z ** 8 - 247 * z ** 7 - 4293 * z ** 6 -\
15619 * z ** 5 - 15619 * z ** 4 - 4293 * z **\
3 - 247 * z ** 2 - z) / (z ** 9 - 9 * z ** 8 +\
36 * z ** 7 - 84 * z ** 6 + 126 * z ** 5 - 126 *\
z ** 4 + 84 * z ** 3 - 36 * z ** 2 + 9 * z - 1)
if index == -9:
return (z ** 9 + 502 * z ** 8 + 14608 * z ** 7 +\
88234 * z ** 6 + 156190 * z ** 5 + 88234 * z ** 4 +\
14608 * z ** 3 + 502 * z ** 2 + z) / (z ** 10 -\
10 * z ** 9 + 45 * z ** 8 - 120 * z ** 7 + 210 * z **\
6 - 252 * z ** 5 + 210 * z ** 4 - 120 * z ** 3 + 45 *\
z ** 2 - 10 * z + 1)
if index == -10:
return (-z ** 10 - 1013 * z ** 9 - 47840 * z ** 8 -\
455192 * z ** 7 - 1310354 * z ** 6 - 1310354 * z ** 5 -\
455192 * z ** 4 - 47840 * z ** 3 - 1013 * z ** 2 - z) /\
(z ** 11 - 11 * z ** 10 + 55 * z ** 9 - 165 * z ** 8 +\
330 * z ** 7 - 462 * z ** 6 + 462 * z ** 5 - 330 * z **\
4 + 165 * z ** 3 - 55 * z ** 2 + 11 * z - 1)
if index == -11:
return (z ** 11 + 2036 * z ** 10 + 152637 * z ** 9 +\
2203488 * z ** 8 + 9738114 * z**7 + 15724248 *\
z ** 6 + 9738114 * z ** 5 + 2203488 * z ** 4 +\
152637 * z ** 3 + 2036 * z ** 2 + z) / (z ** 12 - 12 *\
z ** 11 + 66 * z ** 10 - 220 * z ** 9 + 495 * z ** 8 -\
792 * z ** 7 + 924 * z ** 6 - 792 * z ** 5 + 495 * z **\
4 - 220 * z ** 3 + 66 * z ** 2 - 12 * z + 1)
if index == -12:
return (-z ** 12 - 4083 * z ** 11 - 478271 * z ** 10 -\
10187685 * z ** 9 - 66318474 * z ** 8 - 162512286 *\
z ** 7 - 162512286 * z ** 6 - 66318474 * z ** 5 -\
10187685 * z ** 4 - 478271 * z ** 3 - 4083 * z **\
2 - z) / (z ** 13 - 13 * z ** 12 + 78 * z ** 11 -\
286 * z ** 10 + 715 * z ** 9 - 1287 * z ** 8 + 1716 *\
z ** 7 - 1716 * z ** 6 + 1287 * z ** 5 - 715 * z **\
4 + 286 * z ** 3 - 78 * z ** 2 + 13 * z - 1)
if index == -13:
return (z ** 13 + 8178 * z ** 12 + 1479726 * z ** 11 +\
45533450 * z ** 10 + 423281535 * z ** 9 + 1505621508 *\
z ** 8 + 2275172004 * z ** 7 + 1505621508 * z ** 6 +\
423281535 * z ** 5 + 45533450 * z ** 4 + 1479726 *\
z ** 3 + 8178 * z ** 2 + z) / (z ** 14 - 14 * z ** 13 +\
91 * z ** 12 - 364 * z ** 11 + 1001 * z ** 10 - 2002 *\
z ** 9 + 3003 * z ** 8 - 3432 * z ** 7 + 3003 * z **\
6 - 2002 * z ** 5 + 1001 * z ** 4 - 364 * z ** 3 +\
91 * z ** 2 - 14 * z + 1)
if index == -14:
return (-z ** 14 - 16369 * z ** 13 - 4537314 * z ** 12 -\
198410786 * z ** 11 - 2571742175 * z ** 10 -\
12843262863 * z ** 9 - 27971176092 * z ** 8 -\
27971176092 * z ** 7 - 12843262863 * z ** 6 -\
2571742175 * z ** 5 - 198410786 * z ** 4 - 4537314 *\
z ** 3 - 16369 * z ** 2 - z) / (z ** 15 - 15 * z **\
14 + 105 * z ** 13 - 455 * z ** 12 + 1365 * z ** 11 -\
3003 * z ** 10 + 5005 * z ** 9 - 6435 * z ** 8 +\
6435 * z ** 7 - 5005 * z ** 6 + 3003 * z ** 5 -\
1365 * z ** 4 + 455 * z ** 3 - 105 * z ** 2 + 15 *\
z - 1)
if index == -15:
return (z ** 15 + 32752 * z ** 14 + 13824739 * z ** 13 +\
848090912 * z ** 12 + 15041229521 * z ** 11 +\
102776998928 * z ** 10 + 311387598411 * z ** 9 +\
447538817472 * z ** 8 + 311387598411 * z ** 7 +\
102776998928 * z ** 6 + 15041229521 * z ** 5 +\
848090912 * z ** 4 + 13824739 * z ** 3 + 32752 * z **\
2 + z) / (z ** 16 - 16 * z ** 15 + 120 * z ** 14 -\
560 * z ** 13 + 1820 * z ** 12 - 4368 * z ** 11 +\
8008 * z ** 10 - 11440 * z ** 9 + 12870 * z ** 8 -\
11440 * z ** 7 + 8008 * z ** 6 - 4368 * z ** 5 +\
1820 * z ** 4 - 560 * z ** 3 + 120 * z ** 2 -\
16 * z + 1)
if index == -16:
return (-z ** 16 - 65519 * z ** 15 - 41932745 * z ** 14 -\
3572085255 * z ** 13 - 85383238549 * z ** 12 -\
782115518299 * z ** 11 - 3207483178157 * z ** 10 -\
6382798925475 * z ** 9 - 6382798925475 * z ** 8 -\
3207483178157 * z ** 7 - 782115518299 * z ** 6 -\
85383238549 * z ** 5 - 3572085255 * z ** 4 -\
41932745 * z ** 3 - 65519 * z ** 2 - z) / (z **\
17 - 17 * z ** 16 + 136 * z ** 15 - 680 * z **\
14 + 2380 * z ** 13 - 6188 * z ** 12 + 12376 *\
z ** 11 - 19448 * z ** 10 + 24310 * z ** 9 - 24310 *\
z ** 8 + 19448 * z ** 7 - 12376 * z ** 6 + 6188 * z **\
5 - 2380 * z ** 4 + 680 * z ** 3 - 136 * z ** 2 +\
17 * z - 1)
if index == -17:
return (z ** 17 + 131054 * z ** 16 + 126781020 * z ** 15 +\
14875399450 * z ** 14 + 473353301060 * z ** 13 +\
5717291972382 * z ** 12 + 31055652948388 * z ** 11 +\
83137223185370 * z ** 10 + 114890380658550 * z ** 9 +\
83137223185370 * z ** 8 + 31055652948388 * z ** 7 +\
5717291972382 * z ** 6 + 473353301060 * z ** 5 +\
14875399450 * z ** 4 + 126781020 * z ** 3 + 131054 *\
z ** 2 + z) / (z ** 18 - 18 * z ** 17 + 153 * z **\
16 - 816 * z ** 15 + 3060 * z ** 14 - 8568 * z **\
13 + 18564 * z ** 12 - 31824 * z ** 11 + 43758 *\
z ** 10 - 48620 * z ** 9 + 43758 * z ** 8 - 31824 *\
z ** 7 + 18564 * z ** 6 - 8568 * z ** 5 + 3060 *\
z ** 4 - 816 * z ** 3 + 153 * z ** 2 - 18 * z + 1)
if index == -18:
return (-z ** 18 - 262125 * z ** 17 - 382439924 * z ** 16 -\
61403313100 * z ** 15 - 2575022097600 * z ** 14 -\
40457344748072 * z ** 13 - 285997074307300 * z ** 12 -\
1006709967915228 * z ** 11 - 1865385657780650 * z **\
10 - 1865385657780650 * z ** 9 - 1006709967915228 *\
z ** 8 - 285997074307300 * z ** 7 - 40457344748072 *\
z ** 6 - 2575022097600 * z ** 5 - 61403313100 * z **\
4 - 382439924 * z ** 3 - 262125 * z ** 2 - z) / (z **\
19 - 19 * z ** 18 + 171 * z ** 17 - 969 * z ** 16 +\
3876 * z ** 15 - 11628 * z ** 14 + 27132 * z ** 13 -\
50388 * z ** 12 + 75582 * z ** 11 - 92378 * z ** 10 +\
92378 * z ** 9 - 75582 * z ** 8 + 50388 * z ** 7 -\
27132 * z ** 6 + 11628 * z ** 5 - 3876 * z ** 4 +\
969 * z ** 3 - 171 * z ** 2 + 19 * z - 1)
if index == -19:
return (z ** 19 + 524268 * z ** 18 + 1151775897 * z ** 17 +\
251732291184 * z ** 16 + 13796160184500 * z ** 15 +\
278794377854832 * z ** 14 + 2527925001876036 * z **\
13 + 11485644635009424 * z ** 12 + 27862280567093358 *\
z ** 11 + 37307713155613000 * z ** 10 +\
27862280567093358 * z ** 9 + 11485644635009424 * z **\
8 + 2527925001876036 * z ** 7 + 278794377854832 * z **\
6 + 13796160184500 * z ** 5 + 251732291184 * z ** 4 +\
1151775897 * z ** 3 + 524268 * z ** 2 + z) / (z **\
20 - 20 * z ** 19 + 190 * z ** 18 - 1140 * z ** 17 +\
4845 * z ** 16 - 15504 * z ** 15 + 38760 * z ** 14 -\
77520 * z ** 13 + 125970 * z ** 12 - 167960 * z **\
11 + 184756 * z ** 10 - 167960 * z ** 9 + 125970 *\
z ** 8 - 77520 * z ** 7 + 38760 * z ** 6 - 15504 *\
z ** 5 + 4845 * z ** 4 - 1140 * z ** 3 + 190 * z **\
2 - 20 * z + 1)
def feynman_loop_number(self, diagram):
"""
Calculate the Feynman Loop Number of a Diagram.
The Feynman Loop Number of a connected looped multigraph
is the number of 1-vertices+2-vertices + the number of loops
"""
if diagram.num_edges() == 0:
return 0
return diagram.degree().count(1) + diagram.degree().count(2) +\
diagram.num_edges() - diagram.num_verts() + 1
def symmetry_factor(self, diag):
"""
Calculate the symmetry factor of a diagram.
This is equal to the order of the group of vertex
permutations preserving edges times k! for each
k-multiedge times 2^number of loops
"""
symfactor = diag.automorphism_group().cardinality()
for foo in diag.vertices():
for bar in range(foo, diag.num_verts()):
conecs = diag.adjacency_matrix()[foo][bar]
symfactor = kronecker_delta(foo, bar) * symfactor * 2 **\
conecs * factorial(conecs) + (1 - kronecker_delta(\
foo, bar)) * symfactor * factorial(conecs)
return QQ(1) / symfactor
def bernoulli_plus_half(self, m):
"""Return bernoulli number with convention B1=+1/2."""
return bernoulli(m) * (-1) ** m
def polylogarithm(self, index, z):
"""Give the nth polylogarithm evaluated at z."""
_ = gp.set_precision(self.prec)
if (index, z) in self.prev:
return self.prev[(index, z)]
if index <= 1 and index >= -19:
tmp = self.CC(self.pre_comp_polylog(index, z))
self.prev[(index, z)] = tmp
if len(self.prev) > 1000:
self.prev.popitem(last=False)
return tmp
return self.CC(gp.subst(gp(polylog(index, x)), x, z))
def gamma(self, eye, kay, ell):
"""Return the gamma equation for vertex_factor_tensor."""
if kay == 0:
return sum([self.polylogarithm(2 - self.n, 1 / z) for z \
in self.zees]) * self.bernoulli_plus_half(self.n) /\
factorial(self.n) + kronecker_delta(self.n, 2) * \
(self.f * self.B.inverse() * self.A *\
self.f / 8)[0][0]
return (-1) ** kay * sum([h ** (bar - 1) / factorial(bar) *
self.bernoulli_plus_half(bar) * self.polylogarithm(\
2 - bar - kay, 1 / self.zees[eye]) for bar in \
range((kronecker_delta(kay, 1) + kronecker_delta(kay, 2)),\
1 + (kronecker_delta(kay, 1) + kronecker_delta(kay, 2)) + \
self.n - ell)]) - kronecker_delta(kay, 1) * self.CC(0.5) * \
(self.B.inverse() * self.nu)[eye]
def vertex_factor_tensor(self):
"""
Generate vertex gamma as a tensor access values.
Output is in the form
vertexgamma[feynman_loop_number][vertex_degree][ith_shape_parameter]
"""
var('h')
return [[[self.gamma(eye, kay, ell) for eye in
range(len(self.zees))] for kay in range(2 * self.n + 1)]\
for ell in range(self.n + 1)]
def diagram_contribution_to_nloop(self, diagram):
"""The diagram contribution to the n-loop invariant."""
N = len(self.zees)
hamil = -self.B.inverse() * self.A + diagonal_matrix([1 / (1 - z)\
for z in self.zees])
prop = h * hamil.inverse()
temp_sum = self.CC(0)
for foo in range(N ** diagram.num_verts()):
indices = [floor(foo / (N ** bar)) % N for bar in
range(diagram.num_verts())]
temp_sum += prod([prop[indices[eee[0]]]
[indices[eee[1]]] for eee in diagram.edges
(labels=False)] +
[self.ver_factor[self.feynman_loop_number(diagram)]
[diagram.degree()[vee]][indices[vee]] for vee in
diagram.vertices()]).expand()
return self.symmetry_factor(diagram) *\
temp_sum.coeff(h, self.n - 1)
def nloop_invariant(self):
"""The Dimofte-Garoufalidis n-loop invariant."""
self.ver_factor = self.vertex_factor_tensor()
PROCESSES = multiprocessing.cpu_count()
#print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
pool = multiprocessing.Pool(PROCESSES)
collect_results = pool.map(self.diagram_contribution_to_nloop,\
self.diagrams)
loop_invar = sum(collect_results) + self.ver_factor[self.n][0][0]
return loop_invar
def nloop_from_manifold(manifold, n, diagrams, engine=None, verbose=False,\
file_name=None):
D = NeumannZagierDatum(manifold, engine, verbose, file_name)
D.generate_nz_data()
E = nloop(D.nz, n, all_diagrams)
if n == 1:
return [E.one_loop(), D.nz]
return [E.nloop_invariant(), D.nz]
def nloop_from_nzdatum(nz, n, diagrams):
E = nloop(nz, n, all_diagrams)
if n == 1:
return [E.one_loop(), nz]
return [E.nloop_invariant(), nz]
|
|
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from nova.api.openstack.compute.contrib import flavormanage
from nova.compute import flavors
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_flavor_by_flavor_id(flavorid, read_deleted='yes'):
if flavorid == 'failtest':
raise exception.NotFound("Not found sucka!")
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
if read_deleted != 'no':
raise test.TestingException("Should not be reading deleted")
return {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': flavorid,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7,
'is_public': True,
'disabled': False,
}
def fake_destroy(flavorname):
pass
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
if flavorid is None:
flavorid = 1234
newflavor = fake_get_flavor_by_flavor_id(flavorid,
read_deleted="no")
newflavor["name"] = name
newflavor["memory_mb"] = int(memory_mb)
newflavor["vcpus"] = int(vcpus)
newflavor["root_gb"] = int(root_gb)
newflavor["ephemeral_gb"] = int(ephemeral_gb)
newflavor["swap"] = swap
newflavor["rxtx_factor"] = float(rxtx_factor)
newflavor["is_public"] = bool(is_public)
return newflavor
class FlavorManageTest(test.TestCase):
def setUp(self):
super(FlavorManageTest, self).setUp()
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, "destroy", fake_destroy)
self.stubs.Set(flavors, "create", fake_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
self.controller = flavormanage.FlavorManageController()
self.app = fakes.wsgi_app(init_only=('flavors',))
def test_delete(self):
req = fakes.HTTPRequest.blank('/v2/123/flavors/1234')
res = self.controller._delete(req, 1234)
self.assertEqual(res.status_int, 202)
# subsequent delete should fail
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, req, "failtest")
def test_create(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1234,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
for key in expected["flavor"]:
self.assertEquals(body["flavor"][key], expected["flavor"][key])
def test_create_public_default(self):
flavor = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1234,
"swap": 512,
"rxtx_factor": 1,
}
}
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1234,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
self.stubs.Set(flavors, "create", fake_create)
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(flavor)
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
for key in expected["flavor"]:
self.assertEquals(body["flavor"][key], expected["flavor"][key])
def test_create_without_flavorid(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
for key in expected["flavor"]:
self.assertEquals(body["flavor"][key], expected["flavor"][key])
def test_flavor_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
raise exception.InstanceTypeExists(name=name)
self.stubs.Set(flavors, "create", fake_create)
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
def test_invalid_memory_mb(self):
"""Check negative and decimal number can't be accepted."""
self.stubs.UnsetAll()
self.assertRaises(exception.InvalidInput, flavors.create, "abc",
-512, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
512.2, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
None, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
512, 2, None, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
"test_memory_mb", 2, None, 1, 1234, 512, 1, True)
|
|
# -*- coding: utf-8 -*-
# OpenGL example code - Perspective
# set up a perspective projection and render a rotating cube
import ctypes
import numpy as np
from OpenGL.GL import *
from OpenGL.GL import shaders
from glfw import *
import glm
class Window(object):
def __init__(self, width=640, height=480, title='GLFW opengl window'):
self.width = width
self.height = height
self.title = title
self.window = None
self.__vertexShader = './shaders/%s.vert' % self.title
self.__fragmentShader = './shaders/%s.frag' % self.title
self.__shaderProgram = None
self.__vao = None
self.__vpLocation = None
def shaderFromFile(self, shaderType, shaderFile):
"""read shader from file and compile it"""
shaderSrc = ''
with open(shaderFile) as sf:
shaderSrc = sf.read()
return shaders.compileShader(shaderSrc, shaderType)
def initGL(self):
"""opengl initialization"""
# load shaders
vertexShader = self.shaderFromFile(GL_VERTEX_SHADER, self.__vertexShader)
fragmentShader = self.shaderFromFile(GL_FRAGMENT_SHADER, self.__fragmentShader)
self.__shaderProgram = shaders.compileProgram(vertexShader, fragmentShader)
if not self.__shaderProgram:
self.close()
self.__vpLocation = glGetUniformLocation(self.__shaderProgram, 'ViewProjection')
# generate and bind the vao
self.__vao = glGenVertexArrays(1)
glBindVertexArray(self.__vao)
# generate and bind the buffer object
vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
# data for a fullscreen quad
vertexData = np.array([
# x y z U V
# face 0:
1.0, 1.0, 1.0, 1.0, 0.0, 0.0, # vertex 0
-1.0, 1.0, 1.0, 1.0, 0.0, 0.0, # vertex 1
1.0,-1.0, 1.0, 1.0, 0.0, 0.0, # vertex 2
-1.0,-1.0, 1.0, 1.0, 0.0, 0.0, # vertex 3
# face 1:
1.0, 1.0, 1.0, 0.0, 1.0, 0.0, # vertex 0
1.0,-1.0, 1.0, 0.0, 1.0, 0.0, # vertex 1
1.0, 1.0,-1.0, 0.0, 1.0, 0.0, # vertex 2
1.0,-1.0,-1.0, 0.0, 1.0, 0.0, # vertex 3
# face 2:
1.0, 1.0, 1.0, 0.0, 0.0, 1.0, # vertex 0
1.0, 1.0,-1.0, 0.0, 0.0, 1.0, # vertex 1
-1.0, 1.0, 1.0, 0.0, 0.0, 1.0, # vertex 2
-1.0, 1.0,-1.0, 0.0, 0.0, 1.0, # vertex 3
# face 3:
1.0, 1.0,-1.0, 1.0, 1.0, 0.0, # vertex 0
1.0,-1.0,-1.0, 1.0, 1.0, 0.0, # vertex 1
-1.0, 1.0,-1.0, 1.0, 1.0, 0.0, # vertex 2
-1.0,-1.0,-1.0, 1.0, 1.0, 0.0, # vertex 3
# face 4:
-1.0, 1.0, 1.0, 0.0, 1.0, 1.0, # vertex 0
-1.0, 1.0,-1.0, 0.0, 1.0, 1.0, # vertex 1
-1.0,-1.0, 1.0, 0.0, 1.0, 1.0, # vertex 2
-1.0,-1.0,-1.0, 0.0, 1.0, 1.0, # vertex 3
# face 5:
1.0,-1.0, 1.0, 1.0, 0.0, 1.0, # vertex 0
-1.0,-1.0, 1.0, 1.0, 0.0, 1.0, # vertex 1
1.0,-1.0,-1.0, 1.0, 0.0, 1.0, # vertex 2
-1.0,-1.0,-1.0, 1.0, 0.0, 1.0, # vertex 3
], dtype=np.float32)
# fill with data
glBufferData(GL_ARRAY_BUFFER, vertexData.nbytes, vertexData, GL_STATIC_DRAW)
# set up generic attrib pointers
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * 4, None)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * 4, ctypes.c_void_p(3 * 4))
ibo = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo)
indexData = np.array([
# face 0:
0, 1, 2, # first triangle
2, 1, 3, # second triangle
# face 1:
4, 5, 6, # first triangle
6, 5, 7, # second triangle
# face 2:
8, 9, 10, # first triangle
10, 9, 11, # second triangle
# face 3:
12, 13, 14, # first triangle
14, 13, 15, # second triangle
# face 4:
16, 17, 18, # first triangle
18, 17, 19, # second triangle
# face 5:
20, 21, 22, # first triangle
22, 21, 23, # second triangle
], dtype=np.uint)
# fill with data
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexData.nbytes, indexData, GL_STATIC_DRAW)
glBindVertexArray(0)
# we are drawing 3d objects so we want depth testing
glEnable(GL_DEPTH_TEST)
def renderGL(self):
"""opengl render method"""
# get the time in seconds
t = glfwGetTime()
# clear first
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# use the shader program
glUseProgram(self.__shaderProgram)
# calculate ViewProjection matrix
projection = glm.perspective(90.0, 4.0 / 3.0, .1, 100.0)
# translate the world/view position
view = glm.translate(glm.mat4(1.0), glm.vec3(0.0, 0.0, -5.0))
# make the camera rotate around the origin
view = glm.rotate(view, 90.0 * t, glm.vec3(1.0, 1.0, 1.0))
viewProjection = np.array(projection * view, dtype=np.float32)
# set the uniform
glUniformMatrix4fv(self.__vpLocation, 1, GL_FALSE, viewProjection)
# bind the vao
glBindVertexArray(self.__vao)
# draw
glDrawElements(GL_TRIANGLES, 6*6, GL_UNSIGNED_INT, None)
glBindVertexArray(0)
glUseProgram(0)
def initWindow(self):
"""setup window options. etc, opengl version"""
# select opengl version
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE)
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3)
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3)
def show(self):
"""create the window and show it"""
self.initWindow()
self.window = glfwCreateWindow(self.width, self.height, self.title, 0, 0)
if self.window == 0:
glfwTerminate()
raise Exception('failed to open window')
glfwMakeContextCurrent(self.window)
# initialize opengl
self.initGL()
while not glfwWindowShouldClose(self.window):
glfwPollEvents()
self.renderGL()
# check for errors
error = glGetError()
if error != GL_NO_ERROR:
raise Exception(error)
# finally swap buffers
glfwSwapBuffers(self.window)
self.close()
def close(self):
glfwDestroyWindow(self.window)
glfwTerminate()
if __name__ == '__main__':
import os.path
if glfwInit() == GL_FALSE:
raise Exception('failed to init GLFW')
title = os.path.basename(__file__)
win = Window(title=title[:-3])
win.show()
|
|
"""Tests for auto_ptr feature"""
from unittest import mock
from django.contrib.auth import get_user_model
from django.test import TestCase
from powerdns.models import Domain, Record, get_ptr_obj
from .utils import (
DomainFactory,
DomainTemplateFactory,
RecordFactory,
RecordTemplateFactory,
assert_does_exist,
assert_not_exists,
)
from powerdns.utils import AutoPtrOptions
class TestAutoPtr(TestCase):
"""Tests for auto_ptr feature"""
def setUp(self):
self.user = get_user_model().objects.create_superuser(
'user', '[email protected]', 'password'
)
self.reverse_template = DomainTemplateFactory(name='reverse')
self.alt_reverse_template = DomainTemplateFactory(name='reverse 2')
self.soa_record = RecordTemplateFactory(
type='SOA',
name='{domain-name}',
content=(
'ns1.{domain-name} hostmaster.{domain-name} '
'0 43200 600 1209600 600'
),
domain_template=self.reverse_template,
)
self.alt_soa_record = RecordTemplateFactory(
type='SOA',
name='{domain-name}',
content=(
'nameserver1.{domain-name} hostmaster.{domain-name} '
'0 43200 1200 1209600 1200'
),
domain_template=self.alt_reverse_template,
)
self.ptr_domain = DomainFactory(
name='example.com',
template=None,
reverse_template=self.reverse_template,
type='NATIVE',
auto_ptr=AutoPtrOptions.ALWAYS,
)
self.ptr_if_domain = DomainFactory(
name='ptr-if-domain.com',
template=None,
reverse_template=self.reverse_template,
type='NATIVE',
auto_ptr=AutoPtrOptions.ONLY_IF_DOMAIN,
)
self.no_ptr_domain = DomainFactory(
name='no-ptr--domain.com',
template=None,
reverse_template=self.reverse_template,
type='NATIVE',
auto_ptr=AutoPtrOptions.NEVER,
)
def tearDown(self):
for Model in [Domain, Record, get_user_model()]:
Model.objects.all().delete()
def test_default_ptr_created(self):
"""A PTR record is created for an A record with default template"""
RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
owner=self.user,
)
domain = Domain.objects.get(name='1.168.192.in-addr.arpa')
self.assertEqual(domain.type, 'NATIVE')
self.assertTrue(domain.get_soa().content.endswith('600'))
assert_does_exist(
Record,
domain=domain,
name='1.1.168.192.in-addr.arpa',
owner=self.user,
)
def test_ptr_gets_service_from_record(self):
record = RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
owner=self.user,
)
ptr = get_ptr_obj(record.content, record.name)
self.assertNotEqual(record.service, None)
self.assertEqual(record.service, ptr.service)
def test_auto_ptr_edit(self):
"""PTR changes when A changes"""
record = RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
record.content = '192.168.1.9'
record.save()
domain = Domain.objects.get(name='1.168.192.in-addr.arpa')
assert_does_exist(
Record,
domain=domain,
name='9.1.168.192.in-addr.arpa',
)
assert_not_exists(
Record,
domain=domain,
name='1.1.168.192.in-addr.arpa',
)
def test_record_update_wont_create_auto_ptr_when_never_set(self):
record = RecordFactory(
domain=self.no_ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
self.assertEqual(
Record.objects.filter(name='1.1.168.192.in-addr.arpa').count(), 0
)
record.remarks = 'this change won\'t trigger ptr creation'
record.save()
self.assertEqual(
Record.objects.filter(name='1.1.168.192.in-addr.arpa').count(), 0
)
def test_auto_ptr_fields_get_update_when_record_is_changed(self):
record = RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
ttl=3600,
)
record.content = '192.168.1.9'
new_ttl = 7200
record.ttl = new_ttl
record.disabled = True
record.save()
ptr_record = Record.objects.get(
domain=Domain.objects.get(name='1.168.192.in-addr.arpa'),
name='9.1.168.192.in-addr.arpa',
)
self.assertTrue(record.ttl == ptr_record.ttl == new_ttl)
self.assertTrue(record.disabled == ptr_record.disabled is True)
def test_auto_ptr_off(self):
"""PTR is removed when setting auto_ptr to NEVER"""
RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
domain = Domain.objects.get(
name='1.168.192.in-addr.arpa'
)
self.ptr_domain.auto_ptr = AutoPtrOptions.NEVER
self.ptr_domain.save()
assert_not_exists(
Record,
domain=domain,
name='1.1.168.192.in-addr.arpa',
)
def test_default_ptr_never(self):
"""A PTR record is not created if auto_ptr set to NEVER"""
domain = DomainFactory(name='1.168.192.in-addr.arpa')
RecordFactory(
domain=self.no_ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
assert_not_exists(
Record,
domain=domain,
name='1.1.168.192.in-addr.arpa'
)
def test_ptr_domain_exists(self):
"""A PTR record with 'only-if-domain' is created if domain exists"""
domain = DomainFactory(name='1.168.192.in-addr.arpa')
RecordFactory(
domain=self.ptr_if_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
assert_does_exist(
Record,
domain=domain,
name='1.1.168.192.in-addr.arpa'
)
def test_ptr_domain_not_exists(self):
"""A PTR record with 'only-if-domain' is NOT created if domain
exists"""
RecordFactory(
domain=self.ptr_if_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
assert_not_exists(
Record,
name='1.1.168.192.in-addr.arpa'
)
def test_alt_ptr_created(self):
"""A PTR record is created for an A record with alternative"""
self.ptr_domain.reverse_template = self.alt_reverse_template
RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
domain = Domain.objects.get(name='1.168.192.in-addr.arpa')
self.assertTrue(domain.get_soa().content.endswith('1200'))
def test_ptr_autoremove(self):
"""A PTR record is automatically removed with its A record"""
a = RecordFactory(
domain=self.ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
assert_does_exist(Record, name='1.1.168.192.in-addr.arpa', type='PTR')
a.delete()
assert_not_exists(Record, name='1.1.168.192.in-addr.arpa', type='PTR')
def test_delete_auto_ptr_works_when_blank_name_and_content(self):
a = Record(
domain=self.ptr_domain,
type='A',
)
a.name = 'site.example.com'
a.content = '192.168.1.1'
a.save()
assert_does_exist(Record, name='1.1.168.192.in-addr.arpa', type='PTR')
a.delete()
assert_not_exists(Record, name='1.1.168.192.in-addr.arpa', type='PTR')
@mock.patch('powerdns.models.powerdns._update_records_ptrs')
def test_update_ptr_signal_is_fired_when_auto_ptr_is_changed(
self, update_records
):
RecordFactory(
domain=self.no_ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
self.no_ptr_domain.auto_ptr = AutoPtrOptions.ALWAYS
saves_counter = update_records.call_count
self.no_ptr_domain.save()
self.assertEqual(update_records.call_count, saves_counter + 1)
@mock.patch('powerdns.models.powerdns._update_records_ptrs')
def test_update_ptr_signal_is_skipped_when_auto_ptr_is_changed(
self, update_records
):
RecordFactory(
domain=self.no_ptr_domain,
type='A',
name='site.example.com',
content='192.168.1.1',
)
saves_counter = update_records.call_count
self.no_ptr_domain.save()
self.assertEqual(update_records.call_count, saves_counter)
# -------------------------------------------------------------------------
# IPv6
# -------------------------------------------------------------------------
def test_default_ptr_created_for_aaaa(self):
"""A PTR record is created for an AAAA record with default template"""
RecordFactory(
domain=self.ptr_domain,
type='AAAA',
name='site.example.com',
content='2001:0db8:0:0::1428:57ab',
owner=self.user,
)
domain = Domain.objects.get(
name='a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
)
self.assertEqual(domain.type, 'NATIVE')
self.assertTrue(domain.get_soa().content.endswith('600'))
assert_does_exist(
Record,
domain=domain,
name='b.a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
owner=self.user,
)
def test_auto_ptr_edit_for_aaaa(self):
"""PTR changes when AAAA changes"""
record = RecordFactory(
domain=self.ptr_domain,
type='AAAA',
name='site.example.com',
content='2001:0db8:0:0::1428:57ab',
)
record.content = '2001:0db9:0:0::1428:57ab'
record.save()
old_domain = Domain.objects.get(
name='a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
)
new_domain = Domain.objects.get(
name='a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.9.b.d.0.1.0.0.2.ip6.arpa', # noqa
)
assert_does_exist(
Record,
domain=new_domain,
name='b.a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.9.b.d.0.1.0.0.2.ip6.arpa', # noqa
type='PTR',
)
assert_not_exists(
Record,
domain=old_domain,
name='b.a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
type='PTR',
)
def test_ptr_autoremove_for_aaaa(self):
"""
A PTR record is automatically removed with its AAAA record is removed
"""
a = RecordFactory(
domain=self.ptr_domain,
type='AAAA',
name='site.example.com',
content='2001:0db8:0:0::1428:57ab',
)
assert_does_exist(
Record,
name='b.a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
type='PTR'
)
a.delete()
assert_not_exists(
Record,
name='b.a.7.5.8.2.4.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa', # noqa
type='PTR'
)
|
|
"""
Test the importing module.
This module relies heavily on an external service and requires
quite a bit of mocking.
"""
import os
import pytest
from django.conf import settings
from django.test import TestCase, override_settings
from mock import patch
from molo.core.api import importers
from molo.core.api.errors import (
RecordOverwriteError,
ReferenceUnimportedContent,
ImportedContentInvalid,
)
from molo.core.api.tests import constants, utils
from molo.core.models import (
ArticlePage,
SectionPage,
ArticlePageRecommendedSections,
ArticlePageRelatedSections,
SiteLanguageRelation,
ArticlePageTags,
SectionPageTags,
BannerPage,
get_translation_for
)
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.utils import get_image_hash
import responses
from wagtail.core.models import Site
from wagtail.images.tests.utils import Image, get_test_image_file
class ArticleImportTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.importer = importers.ArticlePageImporter(
base_url="http://localhost:8000",
content=constants.AVAILABLE_ARTICLES["items"]
)
def test_article_importer_initializtion(self):
self.assertEqual(
self.importer.content(),
constants.AVAILABLE_ARTICLES["items"]
)
@patch("molo.core.api.importers.get_image")
def test_articles_can_be_saved(self, mock_image):
image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
mock_image.return_value = image
# Create parent page to which articles will be saved
section = self.mk_section(
self.section_index, title="Parent Test Section 2",
)
self.assertEqual(ArticlePage.objects.all().count(), 0)
# Save the articles
# Save the first available article
# import pdb;pdb.set_trace()
self.importer.save([0, ], section.id)
self.assertEqual(ArticlePage.objects.all().count(), 1)
def test_nested_fields_can_be_extracted(self):
# It is necessary to separate nested fields in each article
# dictionary from those that are not nested. Reason being
# some of the nested fields have to be treated differently.
pass
@patch("molo.core.api.importers.get_image")
def test_related_image_can_be_retrieved(self, mock_image):
mock_image.return_value = constants.RELATED_IMAGE
# assert 0
# self.assertIsInstance(
# importers.get_image(base_url=self.importer.base_url, image_id=1),
# Image
# )
def tearDown(self):
del self.importer
class SectionImportTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.importer = importers.SectionPageImporter(
base_url="http://localhost:8000",
content=constants.AVAILABLE_SECTIONS["items"]
)
def test_section_importer_initializtion(self):
self.assertEqual(
self.importer.content(),
constants.AVAILABLE_SECTIONS["items"]
)
def tearDown(self):
del self.importer
# @patch("molo.core.api.importers.get_image")
# def test_section_can_be_saved(self, mock_image):
# image = Image.objects.create(
# title="Test image",
# file=get_test_image_file(),
# )
# mock_image.return_value = image
#
# # Add new sections as children pf the SectionIndexPage
# self.assertEqual(SectionPage.objects.all().count(), 0)
#
# # Save the articles
# # Save the first available article
# self.importer.save([0, ], self.section_index.id)
# self.assertEqual(SectionPage.objects.all().count(), 1)
class TestImporterUtilFunctions(TestCase):
def setUp(self):
self.test_url = "http://localhost:8000/api/v2/images/"
@responses.activate
def test_list_of_objects_from_api(self):
responses.add(responses.GET,
self.test_url,
json=constants.WAGTAIL_API_LIST_VIEW, status=200)
returned_list = importers.list_of_objects_from_api(self.test_url)
self.assertEqual(
returned_list,
constants.WAGTAIL_API_LIST_VIEW["items"])
@patch("molo.core.api.importers.requests.get",
side_effect=utils.mocked_requests_get)
def test_list_of_objects_from_api_paginated(self, mock_get):
responses.add(responses.GET,
self.test_url,
json=constants.WAGTAIL_API_LIST_VIEW_PAGE_1, status=200)
responses.add(responses.GET,
"{}?limit=20&offset=20".format(self.test_url),
json=constants.WAGTAIL_API_LIST_VIEW_PAGE_2, status=200)
returned_list = importers.list_of_objects_from_api(self.test_url)
expected_response = (
constants.WAGTAIL_API_LIST_VIEW_PAGE_1["items"] +
constants.WAGTAIL_API_LIST_VIEW_PAGE_2["items"]
)
self.assertEqual(
returned_list,
expected_response)
class TestBaseImporter(MoloTestCaseMixin, TestCase):
def setUp(self):
self.fake_base_url = "http://localhost:8000"
self.mk_main()
self.importer = importers.BaseImporter(self.site.pk,
self.fake_base_url)
def test_format_base_url(self):
self.fake_base_url = "http://localhost:8000/"
base_importer = importers.BaseImporter(self.site.pk,
self.fake_base_url)
self.assertEqual(base_importer.base_url, self.fake_base_url[:-1])
@override_settings(MEDIA_ROOT=os.path.join(settings.PROJECT_ROOT, 'media'))
@override_settings(
DEFAULT_FILE_STORAGE='django.core.files.storage.FileSystemStorage')
class TestImageImporter(MoloTestCaseMixin, TestCase):
def setUp(self):
self.fake_base_url = "http://localhost:8000"
self.mk_main()
self.record_keeper = importers.RecordKeeper()
self.importer = importers.ImageImporter(
self.site.pk, self.fake_base_url,
record_keeper=self.record_keeper)
def test_image_importer_init(self):
self.assertEqual(self.importer.image_url,
"http://localhost:8000/api/v2/images/")
def test_get_image_details(self):
local_image = Image.objects.create(
title='local image',
file=get_test_image_file(),
)
local_image_hash = get_image_hash(local_image)
importer = importers.ImageImporter(self.site.pk,
self.fake_base_url)
self.assertEqual(importer.image_hashes[local_image_hash], local_image)
def test_get_replica_image_returns_match(self):
local_image = Image.objects.create(
title='local image',
file=get_test_image_file(),
)
local_image_hash = get_image_hash(local_image)
self.assertEqual(Image.objects.count(), 1)
self.importer.get_image_details()
replica_image = self.importer.get_replica_image(local_image_hash)
self.assertEqual(replica_image, local_image)
def test_get_replica_image_returns_none(self):
Image.objects.create(
title='local image',
file=get_test_image_file(),
)
self.importer.get_image_details()
replica_image = self.importer.get_replica_image('wrong_hash')
self.assertTrue(replica_image is None)
@responses.activate
def test_fetch_and_create_image_new_image(self):
image_title = "test_title.png"
url = "{}/media/images/SIbomiWV1AQ.original.jpg".format(
self.fake_base_url)
test_file_path = os.getcwd() + '/molo/core/api/tests/test_image.png'
with open(test_file_path, 'rb') as img1:
responses.add(
responses.GET, url,
body=img1.read(), status=200,
content_type='image/jpeg',
stream=True
)
result, context = self.importer.fetch_and_create_image(
url,
image_title)
self.assertEqual(type(result), Image)
self.assertEqual(result.title, image_title)
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(
context["file_url"],
url)
@responses.activate
def test_import_image_raise_exception(self):
image_url = '{}/api/v2/images/'.format(self.fake_base_url)
image_detail_url_1 = "{}{}/".format(image_url,
constants.IMAGE_DETAIL_1["id"])
responses.add(
responses.GET, image_detail_url_1,
json=constants.IMAGE_DETAIL_1_NO_HASH, status=200)
with pytest.raises(ValueError) as exception_info:
self.importer.import_image(
constants.IMAGE_LIST_RESPONSE["items"][0]["id"])
self.assertEqual(
exception_info.value.__str__(),
'image hash should not be none')
@responses.activate
@patch("molo.core.api.importers.ImageImporter.fetch_and_create_image",
side_effect=utils.mocked_fetch_and_create_image)
def test_import_image(self, mock_fetch_and_create_image):
image_url = '{}/api/v2/images/'.format(self.fake_base_url)
foreign_image_id = constants.IMAGE_DETAIL_2["id"]
image_detail_url_2 = "{}{}/".format(image_url, foreign_image_id)
image_file_location = constants.IMAGE_DETAIL_2["image_url"]
responses.add(
responses.GET, image_detail_url_2,
json=constants.IMAGE_DETAIL_2, status=200)
self.assertEqual(Image.objects.count(), 0)
new_image, context = self.importer.import_image(
constants.IMAGE_DETAIL_2["id"])
self.assertEqual(Image.objects.count(), 1)
self.assertEqual(
new_image.title,
constants.IMAGE_DETAIL_2["title"])
# Check returned context
self.assertFalse(context["local_version_existed"])
# check that record has been created
self.assertEqual(
context['file_url'],
"{}{}".format(self.fake_base_url, image_file_location))
@responses.activate
def test_import_image_avoid_duplicates(self):
image_url = '{}/api/v2/images/'.format(self.fake_base_url)
foreign_image_id = constants.IMAGE_DETAIL_1["id"]
image_detail_url_1 = "{}{}/".format(image_url, foreign_image_id)
image_file_location = constants.IMAGE_DETAIL_1["image_url"]
responses.add(
responses.GET, image_detail_url_1,
json=constants.IMAGE_DETAIL_1, status=200)
# create 'duplicate' image with same name
Image.objects.create(
title='local image',
file=get_test_image_file(),
)
# NOTE: images must be re-referenced once added
self.importer.get_image_details()
self.assertEqual(Image.objects.count(), 1)
local_image, context = self.importer.import_image(
constants.IMAGE_DETAIL_1["id"])
self.assertEqual(Image.objects.count(), 1)
# Check context
self.assertTrue(context["local_version_existed"])
self.assertEqual(
context["file_url"],
"{}{}".format(self.fake_base_url, image_file_location))
self.assertEqual(
context["foreign_title"],
constants.IMAGE_DETAIL_1["title"])
# check logs
self.assertEqual(
self.record_keeper.get_local_image(foreign_image_id),
local_image.id)
@responses.activate
@patch("molo.core.api.importers.ImageImporter.fetch_and_create_image",
side_effect=utils.mocked_fetch_and_create_image)
def test_import_images(self, mock_fetch_and_create_image):
image_list_url = '{}/api/v2/images/'.format(self.fake_base_url)
image_detail_url_1 = "{}{}/".format(image_list_url,
constants.IMAGE_DETAIL_1["id"])
image_detail_url_2 = "{}{}/".format(image_list_url,
constants.IMAGE_DETAIL_2["id"])
responses.add(
responses.GET, image_list_url,
json=constants.IMAGE_LIST_RESPONSE, status=200)
responses.add(
responses.GET, image_detail_url_1,
json=constants.IMAGE_DETAIL_1, status=200)
responses.add(
responses.GET, image_detail_url_2,
json=constants.IMAGE_DETAIL_2, status=200)
self.assertEqual(Image.objects.count(), 0)
self.importer.import_images()
self.assertEqual(Image.objects.count(), 2)
self.assertEqual(
Image.objects.first().title,
constants.IMAGE_DETAIL_1["title"])
self.assertEqual(
Image.objects.last().title,
constants.IMAGE_DETAIL_2["title"])
# check logs
self.assertEqual(
self.record_keeper.get_local_image(constants.IMAGE_DETAIL_1["id"]),
Image.objects.first().id)
self.assertEqual(
self.record_keeper.get_local_image(constants.IMAGE_DETAIL_2["id"]),
Image.objects.last().id)
class TestLanguageImporter(MoloTestCaseMixin, TestCase):
def setUp(self):
self.fake_base_url = "http://localhost:8000"
self.mk_main()
self.importer = importers.LanguageImporter(self.site.pk,
self.fake_base_url)
def test_language_importer_init(self):
self.assertEqual(self.importer.language_url,
"http://localhost:8000/api/v2/languages/")
@responses.activate
def test_get_language_ids(self):
responses.add(responses.GET,
"{}/api/v2/languages/".format(self.fake_base_url),
json=constants.LANGUAGE_LIST_RESPONSE, status=200)
self.assertEqual(
self.importer.get_language_ids(),
[constants.LANGUAGE_LIST_RESPONSE["items"][0]["id"],
constants.LANGUAGE_LIST_RESPONSE["items"][1]["id"]])
@responses.activate
def test_copy_site_languages(self):
responses.add(responses.GET,
"{}/api/v2/languages/".format(self.fake_base_url),
json=constants.LANGUAGE_LIST_RESPONSE, status=200)
responses.add(responses.GET,
"{}/api/v2/languages/1/".format(self.fake_base_url),
json=constants.LANGUAGE_RESPONSE_1, status=200)
responses.add(responses.GET,
"{}/api/v2/languages/2/".format(self.fake_base_url),
json=constants.LANGUAGE_RESPONSE_2, status=200)
self.importer.copy_site_languages()
eng_lang = SiteLanguageRelation.objects.get(locale="en")
fr_lang = SiteLanguageRelation.objects.get(locale="fr")
self.assertTrue(eng_lang)
self.assertTrue(eng_lang.is_active)
self.assertTrue(eng_lang.is_main_language)
self.assertTrue(fr_lang)
self.assertTrue(fr_lang.is_active)
self.assertFalse(fr_lang.is_main_language)
class TestContentImporter(TestCase, MoloTestCaseMixin):
def setUp(self):
self.fake_base_url = "http://localhost:8000"
self.mk_main()
self.record_keeper = importers.RecordKeeper()
self.logger = importers.Logger()
self.importer = importers.ContentImporter(
self.site.pk, self.fake_base_url,
record_keeper=self.record_keeper,
logger=self.logger)
def test_attach_page(self):
content = utils.fake_section_page_response(image=None)
content_copy = dict(content)
result = self.importer.attach_page(
self.section_index,
content_copy)
self.assertTrue(isinstance(result, SectionPage))
# an empty logger means no errors were created
# during page creation
self.assertEqual(self.logger.record, [])
self.assertEqual(
self.section_index.get_children()[0].title,
content["title"])
def test_attach_translated_content(self):
# create 2 languages
self.english = SiteLanguageRelation.objects.create(
language_setting=self.importer.language_setting,
locale='en',
is_active=True)
self.french = SiteLanguageRelation.objects.create(
language_setting=self.importer.language_setting,
locale='fr',
is_active=True)
content = constants.ARTICLE_PAGE_RESPONSE_MAIN_LANG
content_for_translated = constants.ARTICLE_PAGE_RESPONSE_FRENCH
content_copy = dict(content)
content_for_translated_copy = dict(content_for_translated)
index = self.section_index
parent = self.mk_section(index)
self.assertEqual(ArticlePage.objects.count(), 0)
article = self.importer.attach_page(parent, content_copy)
self.assertEqual(ArticlePage.objects.count(), 1)
translated_article = self.importer.attach_translated_content(
article, content_for_translated_copy, "fr")
self.assertEqual(ArticlePage.objects.count(), 2)
site = Site.objects.get(pk=self.importer.site_pk)
self.assertEqual(get_translation_for(
[article],
"fr", site)[0],
translated_article)
def test_create_section_page_translated(self):
# create 2 languages
self.english = SiteLanguageRelation.objects.create(
language_setting=self.importer.language_setting,
locale='en',
is_active=True)
self.french = SiteLanguageRelation.objects.create(
language_setting=self.importer.language_setting,
locale='fr',
is_active=True)
content = constants.SECTION_PAGE_RESPONSE
content_for_translated = constants.SECTION_PAGE_RESPONSE_FRENCH
content_copy = dict(content)
content_for_translated_copy = dict(content_for_translated)
parent = self.section_index
self.assertEqual(SectionPage.objects.count(), 0)
section = self.importer.attach_page(parent, content_copy)
self.assertEqual(SectionPage.objects.count(), 1)
translated_section = self.importer.attach_translated_content(
section, content_for_translated_copy, "fr")
self.assertEqual(SectionPage.objects.count(), 2)
site = Site.objects.get(pk=self.importer.site_pk)
self.assertEqual(get_translation_for(
[section],
"fr", site)[0],
translated_section)
@patch("molo.core.api.importers.requests.get",
side_effect=utils.mocked_requests_get)
def test_recursive_copy(self, mock_get):
'''
This test will copy content with the following structure:
|--Sections
|--Section 1 [with French translation]
| |--Article 1 [with French translation]
| |--Article 2
|
|--Section 2
|--Sub Section
|--Article 3
'''
# create the extra local language
self.french = SiteLanguageRelation.objects.create(
language_setting=self.importer.language_setting,
locale='fr',
is_active=True)
self.assertEqual(SectionPage.objects.count(), 0)
self.assertEqual(ArticlePage.objects.count(), 0)
self.importer.copy_children(
foreign_id=constants.SECTION_INDEX_PAGE_RESPONSE["id"],
existing_node=self.section_index)
self.assertEqual(SectionPage.objects.count(), 4)
self.assertEqual(ArticlePage.objects.count(), 4)
sec_1 = SectionPage.objects.get(
title=constants.SECTION_RESPONSE_1["title"])
self.assertTrue(sec_1)
self.assertEqual(sec_1.get_parent().specific, self.section_index)
self.assertEqual(sec_1.get_children().count(), 3)
sec_1_trans = SectionPage.objects.get(
title=constants.SECTION_RESPONSE_1_TRANSLATION_1["title"])
self.assertTrue(sec_1_trans)
self.assertEqual(sec_1_trans.get_parent().specific, self.section_index)
self.assertEqual(sec_1_trans.get_children().count(), 0)
sec_2 = SectionPage.objects.get(
title=constants.SECTION_RESPONSE_2["title"])
self.assertTrue(sec_2)
self.assertEqual(sec_2.get_parent().specific, self.section_index)
self.assertEqual(sec_2.get_children().count(), 1)
sec_3 = SectionPage.objects.get(
title=constants.SUB_SECTION_RESPONSE_1["title"])
self.assertTrue(sec_3)
self.assertEqual(sec_3.get_parent().specific, sec_2)
self.assertEqual(sec_3.get_children().count(), 1)
art_1 = ArticlePage.objects.get(
title=constants.ARTICLE_RESPONSE_1["title"])
self.assertTrue(art_1)
self.assertTrue(art_1.get_parent().specific, sec_1)
art_1_trans = ArticlePage.objects.get(
title=constants.ARTICLE_RESPONSE_1_TRANSLATION["title"])
self.assertTrue(art_1_trans)
self.assertTrue(art_1_trans.get_parent().specific, sec_1)
art_2 = ArticlePage.objects.get(
title=constants.ARTICLE_RESPONSE_2["title"])
self.assertTrue(art_2)
self.assertTrue(art_2.get_parent().specific, sec_1)
art_3 = ArticlePage.objects.get(
title=constants.NESTED_ARTICLE_RESPONSE["title"])
self.assertTrue(art_3)
self.assertTrue(art_3.get_parent().specific, sec_3)
def test_create_recommended_articles(self):
section = self.mk_section(
self.section_index, title="Parent Test Section 1",
)
# create 2 articles
self.mk_articles(section)
article_main = ArticlePage.objects.first()
article_rec = ArticlePage.objects.last()
# update map_id
# attach imaginary foreign IDs to articles, to fake import data
self.record_keeper.foreign_local_map["page_map"] = {
111: article_main.id, 222: article_rec.id}
# refer copied page to foreign id of recomended article
self.record_keeper.foreign_to_many_foreign_map["recommended_articles"][111] = [222] # noqa
self.assertEqual(ArticlePageRecommendedSections.objects.count(), 0)
# recreate importer with updated record_keeper
self.importer = importers.ContentImporter(
self.site.pk, self.fake_base_url,
record_keeper=self.record_keeper)
self.importer.create_recommended_articles()
self.assertEqual(ArticlePageRecommendedSections.objects.count(), 1)
recomendation = ArticlePageRecommendedSections.objects.first()
self.assertEqual(recomendation.page.specific, article_main)
self.assertEqual(recomendation.recommended_article.specific,
article_rec)
def test_create_related_sections(self):
section_main = self.mk_section(
self.section_index, title="Parent Test Section 1",
)
section_rel = self.mk_section(
self.section_index, title="Parent Test Section 1",
)
# create articles
[parent_section, related_section] = self.mk_sections(section_main)
article = self.mk_article(parent_section)
# update map_id
# attach imaginary foreign IDs to sections, to fake import data
self.record_keeper.foreign_local_map["page_map"] = {
111: section_main.id,
222: section_rel.id,
333: article.id}
# refer copied page to foreign id of related section
self.record_keeper.foreign_to_many_foreign_map["related_sections"][333] = [222] # noqa
self.assertEqual(ArticlePageRelatedSections.objects.count(), 0)
self.importer.create_related_sections()
self.assertEqual(ArticlePageRelatedSections.objects.count(), 1)
relation = ArticlePageRelatedSections.objects.first()
self.assertEqual(relation.page.specific, article)
self.assertEqual(relation.section.specific,
section_rel)
def test_create_nav_tag_relationships(self):
'''
Check creation of ArticlePageTags, which are called nav_tags in model
'''
section = self.mk_section(
self.section_index, title="Parent Test Section 1",
)
article = self.mk_article(section)
# create tag
[tag_1, tag_2] = self.mk_tags(self.tag_index, count=2)
# update map_id
# attach imaginary foreign IDs to sections, to fake import data
self.record_keeper.foreign_local_map["page_map"] = {
111: tag_1.id, 222: tag_2.id, 333: article.id}
self.record_keeper.foreign_to_many_foreign_map["nav_tags"][333] = [111, 222] # noqa
self.assertEqual(ArticlePageTags.objects.count(), 0)
self.importer.create_nav_tag_relationships()
self.assertEqual(ArticlePageTags.objects.count(), 2)
[relation_1, relation_2] = list(ArticlePageTags.objects.all())
self.assertEqual(relation_1.page.specific, article)
self.assertEqual(relation_1.tag.specific,
tag_1)
self.assertEqual(relation_2.page.specific, article)
self.assertEqual(relation_2.tag.specific,
tag_2)
def test_create_section_tag_relationships(self):
section = self.mk_section(self.section_index)
# create tag
[tag_1, tag_2] = self.mk_tags(self.tag_index, count=2)
# update map_id
# attach imaginary foreign IDs to sections, to fake import data
self.record_keeper.foreign_local_map["page_map"] = {
111: tag_1.id, 222: tag_2.id, 333: section.id}
self.record_keeper.foreign_to_many_foreign_map["section_tags"][333] = [111, 222] # noqa
self.assertEqual(SectionPageTags.objects.count(), 0)
self.importer.create_section_tag_relationship()
self.assertEqual(SectionPageTags.objects.count(), 2)
[relation_1, relation_2] = list(SectionPageTags.objects.all())
self.assertEqual(relation_1.page.specific, section)
self.assertEqual(relation_1.tag.specific,
tag_1)
self.assertEqual(relation_2.page.specific, section)
self.assertEqual(relation_2.tag.specific,
tag_2)
def test_create_banner_page_links(self):
banner = self.mk_banner(parent=self.banner_index)
section = self.mk_section(parent=self.section_index)
# fake the banner page data
self.record_keeper.foreign_local_map["page_map"] = {
111: section.id, 222: banner.id}
self.record_keeper.foreign_to_foreign_map["banner_link_page"][222] = 111 # noqa
self.assertFalse(banner.banner_link_page)
self.importer.create_banner_page_links()
banner = BannerPage.objects.get(id=banner.id)
self.assertTrue(banner.banner_link_page)
self.assertEqual(banner.banner_link_page.specific, section)
def test_recreate_article_body(self):
# assumptions
# image reference in body has been imported
# page reference in body has been imported
# article created without body
# record_keeper has this ^ info
section = self.mk_section(self.section_index)
article = self.mk_article(section)
article.body = None
article.save()
article_id = article.id
self.assertFalse(article.body)
body = constants.ARTICLE_PAGE_RESPONSE_STREAM_FIELDS["body"]
self.record_keeper.foreign_local_map["page_map"] = {
48: section.id, # page linked to in article body
999: article.id # article page with body
}
foreign_image_id = 297
image = Image.objects.create(
title="fake_image",
file=get_test_image_file(),
)
self.record_keeper.foreign_local_map["image_map"] = {
foreign_image_id: image.id}
self.record_keeper.article_bodies = {999: body}
self.importer.recreate_article_body()
updated_article = ArticlePage.objects.get(id=article_id)
self.assertTrue(updated_article)
self.assertTrue(updated_article.body)
# TODO: check each field individually
class TestRecordKeeper(TestCase):
def setUp(self):
self.record_keeper = importers.RecordKeeper()
def test_record_keeper_record_local_id(self):
self.record_keeper.record_page_relation(1, 2)
self.assertEqual(
self.record_keeper.foreign_local_map["page_map"][1], 2)
def test_record_keeper_record_local_id_exception(self):
self.record_keeper.foreign_local_map["page_map"][1] = 2
with pytest.raises(RecordOverwriteError) as exception_info:
self.record_keeper.record_page_relation(1, 6)
self.assertEqual(
exception_info.value.__str__(),
"RecordOverwriteError")
def test_record_keeper_get_local_id(self):
self.record_keeper.record_page_relation(1, 2)
self.assertEqual(
self.record_keeper.get_local_page(1), 2)
def test_record_keeper_get_local_id_exception(self):
fake_id = 1
with pytest.raises(ReferenceUnimportedContent) as exception_info:
self.record_keeper.get_local_page(fake_id)
self.assertEqual(
exception_info.value.__str__(),
"Unimported content foreign ID: {}".format(fake_id))
def test_record_nav_tags_with_none(self):
'''
wagtail allows pages to be created without a link page
Test that this case does not cause an error and
'''
nested_fields = constants.NESTED_FIELD_NAV_TAG_WITH_NONE
fake_page_id = 99
self.record_keeper.record_nav_tags(nested_fields, fake_page_id)
self.assertTrue(True)
def test_record_keeper_throws_content_error(self):
fake_page_id = 9999
nested_fields = {
"recommended_articles": [{
"id": 1,
"meta": {
"type": "core.ArticlePageRecommendedSections"
},
"recommended_article": {
"i_d": 27,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/27/"
},
"title": "Article that is nested"
}
}],
}
with pytest.raises(ImportedContentInvalid) as exception_info:
self.record_keeper.record_recommended_articles(
nested_fields, fake_page_id)
self.assertEqual(
exception_info.value.__str__(),
("key of 'id' does not exist in related_item of"
" type: recommended_article")
)
def test_record_keeper_throws_content_error2(self):
fake_page_id = 9999
nested_fields = {
"recommended_articles": [{
"id": 1,
"meta": {
"type": "core.ArticlePageRecommendedSections"
},
"REKKOMMENDED_ARTICLE": { # corrupted field
"id": 27,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/27/"
},
"title": "Article that is nested"
}
}],
}
with pytest.raises(ImportedContentInvalid) as exception_info:
self.record_keeper.record_recommended_articles(nested_fields,
fake_page_id)
self.assertEqual(
exception_info.value.__str__(),
("key of 'recommended_article' does not exist in "
"nested_field of type: recommended_articles")
)
class TestLogger(TestCase):
def setUp(self):
self.logger = importers.Logger()
self.action_log_args = {
"log_type": importers.ACTION,
"message": "action_log_args",
"context": {
"foo": "bar",
"baz": "bonk",
},
}
self.error_log_args = {
"log_type": importers.ERROR,
"message": "error_log_args",
"context": {
"noo": "boo",
"can": "haz",
},
}
self.warning_log_args = {
"log_type": importers.WARNING,
"message": "warning_log_args",
"context": {
"blu": "red",
"grn": "ylw",
},
}
def test_log(self):
self.logger.log(**self.action_log_args)
self.assertEqual(self.logger.record[0], self.action_log_args)
self.logger.log(**self.error_log_args)
self.assertEqual(self.logger.record[0], self.action_log_args)
self.assertEqual(self.logger.record[1], self.error_log_args)
def test_get_email_logs(self):
self.logger.record.append(self.error_log_args)
result = self.logger.get_email_logs()
# Note that content, not formatting, is being tested
self.assertTrue(importers.ERROR in result)
self.assertTrue(self.error_log_args["message"] in result)
self.assertTrue(self.error_log_args["context"]["noo"] in result)
self.logger.record.append(self.warning_log_args)
result = self.logger.get_email_logs()
self.assertTrue(importers.WARNING in result)
self.assertTrue(self.warning_log_args["message"] in result)
self.assertTrue(self.warning_log_args["context"]["blu"] in result)
def test_get_email_logs_only_errors_warnings(self):
self.logger.record.append(self.action_log_args)
result = self.logger.get_email_logs()
self.assertEqual(result, "")
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest import clients
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import timeutils
from tempest import test
CONF = config.CONF
class BaseTrustsV3Test(base.BaseIdentityV3AdminTest):
def setUp(self):
super(BaseTrustsV3Test, self).setUp()
# Use alt_username as the trustee
if not CONF.identity_feature_enabled.trust:
raise self.skipException("Trusts aren't enabled")
self.trustee_username = CONF.identity.alt_username
self.trust_id = None
def tearDown(self):
if self.trust_id:
# Do the delete in tearDown not addCleanup - we want the test to
# fail in the event there is a bug which causes undeletable trusts
self.delete_trust()
super(BaseTrustsV3Test, self).tearDown()
def create_trustor_and_roles(self):
# Get trustor project ID, use the admin project
self.trustor_project_name = self.client.tenant_name
self.trustor_project_id = self.get_tenant_by_name(
self.trustor_project_name)['id']
self.assertIsNotNone(self.trustor_project_id)
# Create a trustor User
self.trustor_username = data_utils.rand_name('user-')
u_desc = self.trustor_username + 'description'
u_email = self.trustor_username + '@testmail.xx'
self.trustor_password = data_utils.rand_name('pass-')
user = self.client.create_user(
self.trustor_username,
description=u_desc,
password=self.trustor_password,
email=u_email,
project_id=self.trustor_project_id)
self.trustor_user_id = user['id']
# And two roles, one we'll delegate and one we won't
self.delegated_role = data_utils.rand_name('DelegatedRole-')
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole-')
role = self.client.create_role(self.delegated_role)
self.delegated_role_id = role['id']
role = self.client.create_role(self.not_delegated_role)
self.not_delegated_role_id = role['id']
# Assign roles to trustor
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.delegated_role_id)
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.not_delegated_role_id)
# Get trustee user ID, use the demo user
trustee_username = self.non_admin_client.user
self.trustee_user_id = self.get_user_by_name(trustee_username)['id']
self.assertIsNotNone(self.trustee_user_id)
# Initialize a new client with the trustor credentials
creds = cred_provider.get_credentials(
username=self.trustor_username,
password=self.trustor_password,
tenant_name=self.trustor_project_name)
os = clients.Manager(credentials=creds)
self.trustor_client = os.identity_v3_client
def cleanup_user_and_roles(self):
if self.trustor_user_id:
self.client.delete_user(self.trustor_user_id)
if self.delegated_role_id:
self.client.delete_role(self.delegated_role_id)
if self.not_delegated_role_id:
self.client.delete_role(self.not_delegated_role_id)
def create_trust(self, impersonate=True, expires=None):
trust_create = self.trustor_client.create_trust(
trustor_user_id=self.trustor_user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.trustor_project_id,
role_names=[self.delegated_role],
impersonation=impersonate,
expires_at=expires)
self.trust_id = trust_create['id']
return trust_create
def validate_trust(self, trust, impersonate=True, expires=None,
summary=False):
self.assertIsNotNone(trust['id'])
self.assertEqual(impersonate, trust['impersonation'])
# FIXME(shardy): ref bug #1246383 we can't check the
# microsecond component of the expiry time, because mysql
# <5.6.4 doesn't support microseconds.
# expected format 2013-12-20T16:08:36.036987Z
if expires is not None:
expires_nousec = re.sub(r'\.([0-9]){6}Z', '', expires)
self.assertTrue(trust['expires_at'].startswith(expires_nousec))
else:
self.assertIsNone(trust['expires_at'])
self.assertEqual(self.trustor_user_id, trust['trustor_user_id'])
self.assertEqual(self.trustee_user_id, trust['trustee_user_id'])
self.assertIn('v3/OS-TRUST/trusts', trust['links']['self'])
self.assertEqual(self.trustor_project_id, trust['project_id'])
if not summary:
self.assertEqual(self.delegated_role, trust['roles'][0]['name'])
self.assertEqual(1, len(trust['roles']))
def get_trust(self):
trust_get = self.trustor_client.get_trust(self.trust_id)
return trust_get
def validate_role(self, role):
self.assertEqual(self.delegated_role_id, role['id'])
self.assertEqual(self.delegated_role, role['name'])
self.assertIn('v3/roles/%s' % self.delegated_role_id,
role['links']['self'])
self.assertNotEqual(self.not_delegated_role_id, role['id'])
self.assertNotEqual(self.not_delegated_role, role['name'])
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
def check_trust_roles(self):
# Check we find the delegated role
roles_get = self.trustor_client.get_trust_roles(
self.trust_id)
self.assertEqual(1, len(roles_get))
self.validate_role(roles_get[0])
role_get = self.trustor_client.get_trust_role(
self.trust_id, self.delegated_role_id)
self.validate_role(role_get)
role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
# And that we don't find not_delegated_role
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust_role,
self.trust_id,
self.not_delegated_role_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.check_trust_role,
self.trust_id,
self.not_delegated_role_id)
def delete_trust(self):
self.trustor_client.delete_trust(self.trust_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust,
self.trust_id)
self.trust_id = None
class TrustsV3TestJSON(BaseTrustsV3Test):
def setUp(self):
super(TrustsV3TestJSON, self).setUp()
self.create_trustor_and_roles()
self.addCleanup(self.cleanup_user_and_roles)
@test.attr(type='smoke')
def test_trust_impersonate(self):
# Test case to check we can create, get and delete a trust
# updates are not supported for trusts
trust = self.create_trust()
self.validate_trust(trust)
trust_get = self.get_trust()
self.validate_trust(trust_get)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_noimpersonate(self):
# Test case to check we can create, get and delete a trust
# with impersonation=False
trust = self.create_trust(impersonate=False)
self.validate_trust(trust, impersonate=False)
trust_get = self.get_trust()
self.validate_trust(trust_get, impersonate=False)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_expire(self):
# Test case to check we can create, get and delete a trust
# with an expiry specified
expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
expires_str = timeutils.isotime(at=expires_at, subsecond=True)
trust = self.create_trust(expires=expires_str)
self.validate_trust(trust, expires=expires_str)
trust_get = self.get_trust()
self.validate_trust(trust_get, expires=expires_str)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_expire_invalid(self):
# Test case to check we can check an invlaid expiry time
# is rejected with the correct error
# with an expiry specified
expires_str = 'bad.123Z'
self.assertRaises(lib_exc.BadRequest,
self.create_trust,
expires=expires_str)
@test.attr(type='smoke')
def test_get_trusts_query(self):
self.create_trust()
trusts_get = self.trustor_client.get_trusts(
trustor_user_id=self.trustor_user_id)
self.assertEqual(1, len(trusts_get))
self.validate_trust(trusts_get[0], summary=True)
@test.attr(type='smoke')
def test_get_trusts_all(self):
self.create_trust()
trusts_get = self.client.get_trusts()
trusts = [t for t in trusts_get
if t['id'] == self.trust_id]
self.assertEqual(1, len(trusts))
self.validate_trust(trusts[0], summary=True)
|
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
X Y Item
"""
import math
import numpy as np
from matplotlib import lines
from matplotlib import patches
class Xyitem(object):
"""XY item to plot"""
def __init__(self, ax, windowsize, vehiclelength, title, xlabel, ylabel):
self.ax = ax
self.windowsize = windowsize
self.vehiclelength = vehiclelength
self.ax.set_title(title)
self.ax.set_xlabel(xlabel, fontsize=10)
self.ax.set_ylabel(ylabel, fontsize=10)
self.lines = []
self.pathstartx = []
self.pathstarty = []
self.carxhist = []
self.caryhist = []
self.targetx = []
self.targety = []
self.pathstartidx = -1
self.carxyhistidx = -1
self.carposidx = -1
self.targethistidx = -1
self.axx = float('inf')
self.axy = float('inf')
self.planningavailable = False
def reset(self):
"""Reset"""
del self.pathstartx[:]
del self.pathstarty[:]
del self.carxhist[:]
del self.caryhist[:]
del self.targetx[:]
del self.targety[:]
self.ax.cla()
self.pathstartidx = -1
self.carxyhistidx = -1
self.carposidx = -1
self.targethistidx = -1
self.axx = float('inf')
self.axy = float('inf')
self.planningavailable = False
def new_planning(self, time, x, y):
"""new planning"""
self.planningtime = time
self.planningx = x
self.planningy = y
self.pathstartx.append(x[0])
self.pathstarty.append(y[0])
if self.pathstartidx == -1:
self.ax.plot(
self.pathstartx,
self.pathstarty,
color='red',
marker='*',
ls='None')
self.pathstartidx = len(self.ax.lines) - 1
self.current_line = lines.Line2D(x, y, color='red', lw=1.5)
self.ax.add_line(self.current_line)
else:
self.ax.lines[self.pathstartidx].set_data(self.pathstartx,
self.pathstarty)
self.current_line.set_data(x, y)
self.planningavailable = True
def new_carstatus(self, time, x, y, heading, steer_angle, autodriving):
"""new carstatus"""
self.carxhist.append(x)
self.caryhist.append(y)
angle = math.degrees(heading) - 90
carcolor = 'red' if autodriving else 'blue'
if self.carxyhistidx == -1:
self.ax.plot(self.carxhist, self.caryhist, color="blue")
self.carxyhistidx = len(self.ax.lines) - 1
self.ax.plot(
self.carxhist,
self.caryhist,
marker=(3, 0, angle),
markersize=20,
mfc=carcolor)
self.carposidx = len(self.ax.lines) - 1
else:
self.ax.lines[self.carxyhistidx].set_data(self.carxhist,
self.caryhist)
self.ax.lines[self.carposidx].set_data(x, y)
self.ax.lines[self.carposidx].set_marker((3, 0, angle))
self.ax.lines[self.carposidx].set_mfc(carcolor)
self.ax.patches[0].remove()
if self.planningavailable:
xtarget = np.interp(time, self.planningtime, self.planningx)
self.targetx.append(xtarget)
ytarget = np.interp(time, self.planningtime, self.planningy)
self.targety.append(ytarget)
if self.targethistidx == -1:
self.ax.plot(self.targetx, self.targety, color="green", lw=1.5)
self.targethistidx = len(self.ax.lines) - 1
else:
self.ax.lines[self.targethistidx].set_data(
self.targetx, self.targety)
self.ax.add_patch(self.gen_steer_curve(x, y, heading, steer_angle))
# Update Window X, Y Axis Limits
xcenter = x + math.cos(heading) * 40
ycenter = y + math.sin(heading) * 40
if xcenter >= (self.axx + 20) or xcenter <= (self.axx - 20) or \
ycenter >= (self.axy + 20) or ycenter <= (self.axy - 20):
scale = self.ax.get_window_extent(
)._transform._boxout._bbox.get_points()[1]
original = self.ax.get_position().get_points()
finalscale = (original[1] - original[0]) * scale
ratio = finalscale[1] / finalscale[0]
self.axx = xcenter
self.axy = ycenter
self.ax.set_xlim(
[xcenter - self.windowsize, xcenter + self.windowsize])
self.ax.set_ylim([
ycenter - self.windowsize * ratio,
ycenter + self.windowsize * ratio
])
def gen_steer_curve(self, x, y, heading, steer_angle):
"""Generate Steering Curve to predict car trajectory"""
if abs(math.tan(math.radians(steer_angle))) > 0.0001:
R = self.vehiclelength / math.tan(math.radians(steer_angle))
else:
R = 100000
radius = abs(R)
lengthangle = 7200 / (2 * math.pi * radius)
if R >= 0:
centerangle = math.pi / 2 + heading
startangle = math.degrees(heading - math.pi / 2)
theta1 = 0
theta2 = lengthangle
else:
centerangle = heading - math.pi / 2
startangle = math.degrees(math.pi / 2 + heading)
theta1 = -lengthangle
theta2 = 0
centerx = x + math.cos(centerangle) * radius
centery = y + math.sin(centerangle) * radius
curve = patches.Arc(
(centerx, centery),
2 * radius,
2 * radius,
angle=startangle,
theta1=theta1,
theta2=theta2,
linewidth=2,
zorder=2)
return curve
def draw_lines(self):
"""plot lines"""
for polygon in self.ax.patches:
self.ax.draw_artist(polygon)
for line in self.ax.lines:
self.ax.draw_artist(line)
|
|
import mimeparse
from django.contrib.auth.models import AnonymousUser
from hyperadmin.states import State
class APIRequest(object):
"""
An API Request
"""
def __init__(self, site, path, url_args, url_kwargs):
self.site = site
self.path = path
self.url_args = url_args
self.url_kwargs = url_kwargs
#self.payload = payload
#self.method = method
#self.user = user
#self.params = params
#self.META = meta
self.session_state = State()
self.endpoint_state = State()
self.endpoint_state['endpoints'] = dict()
self.endpoint_state['link_prototypes'] = dict()
super(APIRequest, self).__init__()
def get_django_request(self):
raise NotImplementedError
@property
def META(self):
return self.session_state['meta']
@property
def media_types(self):
return self.get_site().media_types
def get_response_type(self):
"""
Returns the active response type to be used
:rtype: string
"""
val = self.META.get('HTTP_ACCEPT', self.META.get('CONTENT_TYPE', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_type(self):
"""
Returns the active request type to be used
:rtype: string
"""
val = self.META.get('CONTENT_TYPE', self.META.get('HTTP_ACCEPT', ''))
media_types = self.media_types.keys()
if not media_types:
return val
return mimeparse.best_match(media_types, val) or val
def get_request_media_type(self):
"""
Returns the request media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_request_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_response_media_type(self):
"""
Returns the response media type to be used or raises an error
:raises ValueError: when the requested content type is unrecognized
:rtype: string
"""
content_type = self.get_response_type()
media_type_cls = self.media_types.get(content_type, None)
if media_type_cls is None:
raise ValueError('Unrecognized request content type "%s". Choices are: %s' % (content_type, self.media_types.keys()))
return media_type_cls(self)
def get_endpoint(self, urlname):
"""
Returns a bound endpoint matching the urlname
:param urlname: The urlname to find
:type urlname: string
:raises KeyError: when the urlname does not match any endpoints
:rtype: Endpoint
"""
if urlname not in self.endpoint_state['endpoints']:
endpoint = self.site.get_endpoint_from_urlname(urlname)
bound_endpoint = endpoint.fork(api_request=self)
if bound_endpoint != self.endpoint_state['endpoints'][urlname]:
pass
return self.endpoint_state['endpoints'][urlname]
def record_endpoint(self, endpoint):
"""
Record the endpoint in our urlname cache
:param resource: Endpoint
"""
assert endpoint.api_request == self
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['endpoints']:
self.endpoint_state['endpoints'][urlname] = endpoint
#else:
# original = self.endpoint_state['endpoints'][urlname]
# self.site.get_logger().debug('Double registration at api request level on %s by %s, original: %s' % (urlname, endpoint, original))
def get_link_prototypes(self, endpoint):
"""
Returns the link prototypes to be used by the endpint
:param endpoint: endpoint object
:rtype: list of link prototypes
"""
urlname = endpoint.get_url_name()
if urlname not in self.endpoint_state['link_prototypes']:
link_prototypes = endpoint.create_link_prototypes()
self.endpoint_state['link_prototypes'][urlname] = link_prototypes
return self.endpoint_state['link_prototypes'][urlname]
def get_site(self):
"""
Returns the bound site
:rtype: SiteResource
"""
if 'site' not in self.endpoint_state:
bound_site = self.site.fork(api_request=self)
self.endpoint_state['site'] = bound_site
return self.endpoint_state['site']
def generate_response(self, link, state):
"""
Returns a response generated from the response media type
:param link: The active link representing the endpoint's response
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.serialize(content_type=response_type, link=link, state=state)
def generate_options_response(self, links, state):
"""
Returns an OPTIONS response generated from the response media type
:param links: dictionary mapping available HTTP methods to a link
:param state: The endpoint's state
:rtype: [Http]Response
"""
media_type = self.get_response_media_type()
response_type = self.get_response_type()
return media_type.options_serialize(content_type=response_type, links=links, state=state)
def reverse(self, name, *args, **kwargs):
return self.get_site().reverse(name, *args, **kwargs)
class InternalAPIRequest(APIRequest):
"""
An Internal API Request
"""
def __init__(self, site, path='/', url_args=[], url_kwargs={}, **kwargs):
super(InternalAPIRequest, self).__init__(site, path, url_args, url_kwargs)
kwargs.setdefault('method', 'GET')
kwargs.setdefault('params', {})
kwargs.setdefault('payload', {})
kwargs.setdefault('full_path', path)
for key, val in kwargs.iteritems():
setattr(self, key, val)
def get_full_path(self):
return self.full_path
def get_django_request(self):
return self.request
class HTTPAPIRequest(APIRequest):
"""
Represents an API Request spawned from a Django HTTP Request
"""
get_to_meta_map = {
'_HTTP_ACCEPT':'HTTP_ACCEPT',
'_CONTENT_TYPE':'CONTENT_TYPE',
}
def __init__(self, site, request, url_args, url_kwargs):
self.request = request
path = request.path
super(HTTPAPIRequest, self).__init__(site=site, path=path, url_args=url_args, url_kwargs=url_kwargs)
@property
def payload(self):
if not hasattr(self, '_payload'):
media_type = self.get_request_media_type()
self._payload = media_type.deserialize()
return self._payload
@property
def method(self):
return self.request.method
def get_django_request(self):
return self.request
def get_full_path(self):
return self.request.get_full_path()
@property
def user(self):
return self.session_state.get('auth', AnonymousUser())
@property
def params(self):
if not hasattr(self, '_params'):
self._params = self.request.GET.copy()
return self._params
def get_session_data_from_request(self, request):
#TODO consult site object
data = {'endpoints': {},
'resources': {},
'request': request,
'meta': self.patched_meta(request),
'extra_get_params': self.get_passthrough_params(request),}
if hasattr(request, 'user'):
data['auth'] = request.user
return data
def populate_session_data_from_request(self, request):
data = self.get_session_data_from_request(request)
self.session_state.update(data)
#TODO set response type & request type
def patched_meta(self, request):
meta = dict(request.META)
for src, dst in self.get_to_meta_map.iteritems():
if src in request.GET:
meta[dst] = request.GET[src]
return meta
def get_passthrough_params(self, request):
pass_through_params = dict()
for src, dst in self.get_to_meta_map.iteritems():
if src in request.GET:
pass_through_params[src] = request.GET[src]
return pass_through_params
class NamespaceAPIRequest(InternalAPIRequest):
def __init__(self, api_request, path='/', url_args=[], url_kwargs={}, **kwargs):
self.original_api_request = api_request
kwargs.setdefault('full_path', self.original_api_request.get_full_path())
super(NamespaceAPIRequest, self).__init__(api_request.site, path, url_args, url_kwargs, **kwargs)
self.site = api_request.site.fork(api_request=self)
self.session_state = State(substates=[api_request.session_state])
@property
def user(self):
return self.original_api_request.user
def get_django_request(self):
return self.original_api_request.get_django_request()
class Namespace(object):
"""
Represents alternative data associated with the current api request
Namespaced data is provided by another resource through an internal api request
"""
def __init__(self, name, endpoint, state_data={}):
self.name = name
self.api_request = NamespaceAPIRequest(endpoint.api_request)
self.state_data = state_data
self.endpoint = endpoint.fork(api_request=self.api_request)
self.endpoint.state.update(state_data)
self.api_request.endpoint_state['endpoints'][self.endpoint.get_url_name()] = self.endpoint
def get_namespaces(self):
return dict()
def get_prompt(self):
return self.endpoint.get_prompt()
@property
def link(self):
if not hasattr(self, '_link'):
self._link = self.endpoint.get_link()
return self._link
@property
def state(self):
return self.endpoint.state
|
|
import os
import logging
from django.conf import settings
from django.contrib.gis.db import models
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.template.defaultfilters import slugify
from django.utils.translation import get_language, ugettext_lazy as _
import simplekml
from mapentity.models import MapEntityMixin
from mapentity.serializers import plain_text
from geotrek.authent.models import StructureRelated
from geotrek.core.models import Path, Topology
from geotrek.common.utils import intersecting, classproperty
from geotrek.common.mixins import (PicturesMixin, PublishableMixin,
PictogramMixin, OptionalPictogramMixin)
from geotrek.common.models import Theme
from geotrek.maintenance.models import Intervention, Project
from geotrek.tourism import models as tourism_models
from .templatetags import trekking_tags
logger = logging.getLogger(__name__)
class Trek(StructureRelated, PicturesMixin, PublishableMixin, MapEntityMixin, Topology):
topo_object = models.OneToOneField(Topology, parent_link=True,
db_column='evenement')
departure = models.CharField(verbose_name=_(u"Departure"), max_length=128, blank=True,
help_text=_(u"Departure description"), db_column='depart')
arrival = models.CharField(verbose_name=_(u"Arrival"), max_length=128, blank=True,
help_text=_(u"Arrival description"), db_column='arrivee')
description_teaser = models.TextField(verbose_name=_(u"Description teaser"), blank=True,
help_text=_(u"A brief summary (map pop-ups)"), db_column='chapeau')
description = models.TextField(verbose_name=_(u"Description"), blank=True, db_column='description',
help_text=_(u"Complete description"))
ambiance = models.TextField(verbose_name=_(u"Ambiance"), blank=True, db_column='ambiance',
help_text=_(u"Main attraction and interest"))
access = models.TextField(verbose_name=_(u"Access"), blank=True, db_column='acces',
help_text=_(u"Best way to go"))
disabled_infrastructure = models.TextField(verbose_name=_(u"Disabled infrastructure"), db_column='handicap',
blank=True, help_text=_(u"Any specific infrastructure"))
duration = models.FloatField(verbose_name=_(u"Duration"), default=0, blank=True, db_column='duree',
help_text=_(u"In decimal hours (ex. 1.5 for 1 h 30)"),
validators=[MinValueValidator(0)])
is_park_centered = models.BooleanField(verbose_name=_(u"Is in the midst of the park"), db_column='coeur',
help_text=_(u"Crosses center of park"))
advised_parking = models.CharField(verbose_name=_(u"Advised parking"), max_length=128, blank=True, db_column='parking',
help_text=_(u"Where to park"))
parking_location = models.PointField(verbose_name=_(u"Parking location"), db_column='geom_parking',
srid=settings.SRID, spatial_index=False, blank=True, null=True)
public_transport = models.TextField(verbose_name=_(u"Public transport"), blank=True, db_column='transport',
help_text=_(u"Train, bus (see web links)"))
advice = models.TextField(verbose_name=_(u"Advice"), blank=True, db_column='recommandation',
help_text=_(u"Risks, danger, best period, ..."))
themes = models.ManyToManyField(Theme, related_name="treks",
db_table="o_r_itineraire_theme", blank=True, null=True, verbose_name=_(u"Themes"),
help_text=_(u"Main theme(s)"))
networks = models.ManyToManyField('TrekNetwork', related_name="treks",
db_table="o_r_itineraire_reseau", blank=True, null=True, verbose_name=_(u"Networks"),
help_text=_(u"Hiking networks"))
practice = models.ForeignKey('Practice', related_name="treks",
blank=True, null=True, verbose_name=_(u"Practice"), db_column='pratique')
accessibilities = models.ManyToManyField('Accessibility', related_name="treks",
db_table="o_r_itineraire_accessibilite", blank=True, null=True,
verbose_name=_(u"Accessibilities"))
route = models.ForeignKey('Route', related_name='treks',
blank=True, null=True, verbose_name=_(u"Route"), db_column='parcours')
difficulty = models.ForeignKey('DifficultyLevel', related_name='treks',
blank=True, null=True, verbose_name=_(u"Difficulty"), db_column='difficulte')
web_links = models.ManyToManyField('WebLink', related_name="treks",
db_table="o_r_itineraire_web", blank=True, null=True, verbose_name=_(u"Web links"),
help_text=_(u"External resources"))
related_treks = models.ManyToManyField('self', through='TrekRelationship',
verbose_name=_(u"Related treks"), symmetrical=False,
help_text=_(u"Connections between treks"),
related_name='related_treks+') # Hide reverse attribute
parent = models.ForeignKey('self', verbose_name=_(u"Parent"), db_column='parent', blank=True, null=True,
related_name='children')
information_desks = models.ManyToManyField(tourism_models.InformationDesk, related_name='treks',
db_table="o_r_itineraire_renseignement", blank=True, null=True,
verbose_name=_(u"Information desks"),
help_text=_(u"Where to obtain information"))
points_reference = models.MultiPointField(verbose_name=_(u"Points of reference"), db_column='geom_points_reference',
srid=settings.SRID, spatial_index=False, blank=True, null=True)
source = models.ManyToManyField('common.RecordSource',
null=True, blank=True, related_name='treks',
verbose_name=_("Source"), db_table='o_r_itineraire_source')
eid = models.CharField(verbose_name=_(u"External id"), max_length=128, blank=True, db_column='id_externe')
eid2 = models.CharField(verbose_name=_(u"Second external id"), max_length=128, blank=True, db_column='id_externe2')
objects = Topology.get_manager_cls(models.GeoManager)()
category_id_prefix = 'T'
class Meta:
db_table = 'o_t_itineraire'
verbose_name = _(u"Trek")
verbose_name_plural = _(u"Treks")
ordering = ['name']
def __unicode__(self):
return self.name
@models.permalink
def get_document_public_url(self):
""" Override ``geotrek.common.mixins.PublishableMixin``
"""
return ('trekking:trek_document_public', [], {'lang': get_language(), 'pk': self.pk, 'slug': self.slug})
@property
def related(self):
return self.related_treks.exclude(deleted=True).exclude(pk=self.pk).distinct()
@classproperty
def related_verbose_name(cls):
return _("Related treks")
@property
def relationships(self):
# Does not matter if a or b
return TrekRelationship.objects.filter(trek_a=self)
@property
def published_relationships(self):
return self.relationships.filter(trek_b__published=True)
@property
def poi_types(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
# Can't use values_list and must add 'ordering' because of bug:
# https://code.djangoproject.com/ticket/14930
values = self.pois.values('ordering', 'type')
else:
values = self.pois.values('type')
pks = [value['type'] for value in values]
return POIType.objects.filter(pk__in=set(pks))
@property
def length_kilometer(self):
return "%.1f" % (self.length / 1000.0)
@property
def networks_display(self):
return ', '.join([unicode(n) for n in self.networks.all()])
@property
def districts_display(self):
return ', '.join([unicode(d) for d in self.districts])
@property
def themes_display(self):
return ', '.join([unicode(n) for n in self.themes.all()])
@property
def city_departure(self):
cities = self.cities
return unicode(cities[0]) if len(cities) > 0 else ''
def kml(self):
""" Exports trek into KML format, add geometry as linestring and POI
as place marks """
kml = simplekml.Kml()
# Main itinerary
geom3d = self.geom_3d.transform(4326, clone=True) # KML uses WGS84
line = kml.newlinestring(name=self.name,
description=plain_text(self.description),
coords=geom3d.coords)
line.style.linestyle.color = simplekml.Color.red # Red
line.style.linestyle.width = 4 # pixels
# Place marks
for poi in self.pois:
place = poi.geom_3d.transform(settings.API_SRID, clone=True)
kml.newpoint(name=poi.name,
description=plain_text(poi.description),
coords=[place.coords])
return kml._genkml()
def has_geom_valid(self):
"""A trek should be a LineString, even if it's a loop.
"""
return super(Trek, self).has_geom_valid() and self.geom.geom_type.lower() == 'linestring'
@property
def duration_pretty(self):
return trekking_tags.duration(self.duration)
@classproperty
def duration_pretty_verbose_name(cls):
return _("Formated duration")
@classmethod
def path_treks(cls, path):
treks = cls.objects.existing().filter(aggregations__path=path)
# The following part prevents conflict with default trek ordering
# ProgrammingError: SELECT DISTINCT ON expressions must match initial ORDER BY expressions
return treks.order_by('topo_object').distinct('topo_object')
@classmethod
def topology_treks(cls, topology):
if settings.TREKKING_TOPOLOGY_ENABLED:
qs = cls.overlapping(topology)
else:
area = topology.geom.buffer(settings.TREK_POI_INTERSECTION_MARGIN)
qs = cls.objects.existing().filter(geom__intersects=area)
return qs
@classmethod
def published_topology_treks(cls, topology):
return cls.topology_treks(topology).filter(published=True)
# Rando v1 compat
@property
def usages(self):
return [self.practice] if self.practice else []
@classmethod
def get_create_label(cls):
return _(u"Add a new trek")
@property
def children_id(self):
return list(self.children.order_by('name').values_list('id', flat=True))
@property
def previous_id(self):
if self.parent is None:
return None
children = self.parent.children_id
try:
return children[children.index(self.id) - 1]
except IndexError:
return None
@property
def next_id(self):
if self.parent is None:
return None
children = self.parent.children_id
try:
return children[children.index(self.id) + 1]
except IndexError:
return None
def clean(self):
if self.parent and self.parent == self:
raise ValidationError(_(u"Cannot use itself as parent trek."))
if self.parent and self.parent.parent:
raise ValidationError(_(u"Cannot use a a child trek as parent trek."))
@property
def prefixed_category_id(self):
if settings.SPLIT_TREKS_CATEGORIES_BY_PRACTICE and self.practice:
return '{prefix}{id}'.format(prefix=self.category_id_prefix, id=self.practice.id)
else:
return self.category_id_prefix
def distance(self, to_cls):
if self.practice and self.practice.distance is not None:
return self.practice.distance
else:
return settings.TOURISM_INTERSECTION_MARGIN
def is_public(self):
return self.any_published or (self.parent and self.parent.any_published)
Path.add_property('treks', Trek.path_treks, _(u"Treks"))
Topology.add_property('treks', Trek.topology_treks, _(u"Treks"))
if settings.HIDE_PUBLISHED_TREKS_IN_TOPOLOGIES:
Topology.add_property('published_treks', lambda self: [], _(u"Published treks"))
else:
Topology.add_property('published_treks', lambda self: intersecting(Trek, self).filter(published=True), _(u"Published treks"))
Intervention.add_property('treks', lambda self: self.topology.treks if self.topology else [], _(u"Treks"))
Project.add_property('treks', lambda self: self.edges_by_attr('treks'), _(u"Treks"))
tourism_models.TouristicContent.add_property('treks', lambda self: intersecting(Trek, self), _(u"Treks"))
tourism_models.TouristicContent.add_property('published_treks', lambda self: intersecting(Trek, self).filter(published=True), _(u"Published treks"))
tourism_models.TouristicEvent.add_property('treks', lambda self: intersecting(Trek, self), _(u"Treks"))
tourism_models.TouristicEvent.add_property('published_treks', lambda self: intersecting(Trek, self).filter(published=True), _(u"Published treks"))
class TrekRelationshipManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
# Select treks foreign keys by default
qs = super(TrekRelationshipManager, self).get_queryset().select_related('trek_a', 'trek_b')
# Exclude deleted treks
return qs.exclude(trek_a__deleted=True).exclude(trek_b__deleted=True)
class TrekRelationship(models.Model):
"""
Relationships between treks : symmetrical aspect is managed by a trigger that
duplicates all couples (trek_a, trek_b)
"""
has_common_departure = models.BooleanField(verbose_name=_(u"Common departure"), db_column='depart_commun', default=False)
has_common_edge = models.BooleanField(verbose_name=_(u"Common edge"), db_column='troncons_communs', default=False)
is_circuit_step = models.BooleanField(verbose_name=_(u"Circuit step"), db_column='etape_circuit', default=False)
trek_a = models.ForeignKey(Trek, related_name="trek_relationship_a", db_column='itineraire_a')
trek_b = models.ForeignKey(Trek, related_name="trek_relationship_b", db_column='itineraire_b', verbose_name=_(u"Trek"))
objects = TrekRelationshipManager()
class Meta:
db_table = 'o_r_itineraire_itineraire'
verbose_name = _(u"Trek relationship")
verbose_name_plural = _(u"Trek relationships")
unique_together = ('trek_a', 'trek_b')
def __unicode__(self):
return u"%s <--> %s" % (self.trek_a, self.trek_b)
@property
def relation(self):
return u"%s %s%s%s" % (
self.trek_b.name_display,
_("Departure") if self.has_common_departure else '',
_("Path") if self.has_common_edge else '',
_("Circuit") if self.is_circuit_step else ''
)
@property
def relation_display(self):
return self.relation
class TrekNetwork(PictogramMixin):
network = models.CharField(verbose_name=_(u"Name"), max_length=128, db_column='reseau')
class Meta:
db_table = 'o_b_reseau'
verbose_name = _(u"Trek network")
verbose_name_plural = _(u"Trek networks")
ordering = ['network']
def __unicode__(self):
return self.network
class Practice(PictogramMixin):
name = models.CharField(verbose_name=_(u"Name"), max_length=128, db_column='nom')
distance = models.IntegerField(verbose_name=_(u"Distance"), blank=True, null=True, db_column='distance',
help_text=_(u"Touristic contents and events will associate within this distance (meters)"))
cirkwi = models.ForeignKey('cirkwi.CirkwiLocomotion', verbose_name=_(u"Cirkwi locomotion"), null=True, blank=True)
class Meta:
db_table = 'o_b_pratique'
verbose_name = _(u"Practice")
verbose_name_plural = _(u"Practices")
ordering = ['name']
def __unicode__(self):
return self.name
@property
def slug(self):
return slugify(self.name) or str(self.pk)
class Accessibility(OptionalPictogramMixin):
name = models.CharField(verbose_name=_(u"Name"), max_length=128, db_column='nom')
cirkwi = models.ForeignKey('cirkwi.CirkwiTag', verbose_name=_(u"Cirkwi tag"), null=True, blank=True)
id_prefix = 'A'
class Meta:
db_table = 'o_b_accessibilite'
verbose_name = _(u"Accessibility")
verbose_name_plural = _(u"Accessibilities")
ordering = ['name']
def __unicode__(self):
return self.name
@property
def prefixed_id(self):
return '{prefix}{id}'.format(prefix=self.id_prefix, id=self.id)
@property
def slug(self):
return slugify(self.name) or str(self.pk)
class Route(OptionalPictogramMixin):
route = models.CharField(verbose_name=_(u"Name"), max_length=128, db_column='parcours')
class Meta:
db_table = 'o_b_parcours'
verbose_name = _(u"Route")
verbose_name_plural = _(u"Routes")
ordering = ['route']
def __unicode__(self):
return self.route
class DifficultyLevel(OptionalPictogramMixin):
"""We use an IntegerField for id, since we want to edit it in Admin.
This column is used to order difficulty levels, especially in public website
where treks are filtered by difficulty ids.
"""
id = models.IntegerField(primary_key=True)
difficulty = models.CharField(verbose_name=_(u"Difficulty level"),
max_length=128, db_column='difficulte')
cirkwi_level = models.IntegerField(verbose_name=_(u"Cirkwi level"), blank=True, null=True,
db_column='niveau_cirkwi', help_text=_(u"Between 1 and 8"))
cirkwi = models.ForeignKey('cirkwi.CirkwiTag', verbose_name=_(u"Cirkwi tag"), null=True, blank=True)
class Meta:
db_table = 'o_b_difficulte'
verbose_name = _(u"Difficulty level")
verbose_name_plural = _(u"Difficulty levels")
ordering = ['id']
def __unicode__(self):
return self.difficulty
def save(self, *args, **kwargs):
"""Manually auto-increment ids"""
if not self.id:
try:
last = self.__class__.objects.all().order_by('-id')[0]
self.id = last.id + 1
except IndexError:
self.id = 1
super(DifficultyLevel, self).save(*args, **kwargs)
class WebLinkManager(models.Manager):
def get_queryset(self):
return super(WebLinkManager, self).get_queryset().select_related('category')
class WebLink(models.Model):
name = models.CharField(verbose_name=_(u"Name"), max_length=128, db_column='nom')
url = models.URLField(verbose_name=_(u"URL"), max_length=128, db_column='url')
category = models.ForeignKey('WebLinkCategory', verbose_name=_(u"Category"),
related_name='links', null=True, blank=True,
db_column='categorie')
objects = WebLinkManager()
class Meta:
db_table = 'o_t_web'
verbose_name = _(u"Web link")
verbose_name_plural = _(u"Web links")
ordering = ['name']
def __unicode__(self):
category = "%s - " % self.category.label if self.category else ""
return u"%s%s (%s)" % (category, self.name, self.url)
@classmethod
@models.permalink
def get_add_url(cls):
return ('trekking:weblink_add', )
class WebLinkCategory(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
class Meta:
db_table = 'o_b_web_category'
verbose_name = _(u"Web link category")
verbose_name_plural = _(u"Web link categories")
ordering = ['label']
def __unicode__(self):
return u"%s" % self.label
class POIManager(models.GeoManager):
def get_queryset(self):
return super(POIManager, self).get_queryset().select_related('type', 'structure')
class POI(StructureRelated, PicturesMixin, PublishableMixin, MapEntityMixin, Topology):
topo_object = models.OneToOneField(Topology, parent_link=True,
db_column='evenement')
description = models.TextField(verbose_name=_(u"Description"), db_column='description',
help_text=_(u"History, details, ..."))
type = models.ForeignKey('POIType', related_name='pois', verbose_name=_(u"Type"), db_column='type')
eid = models.CharField(verbose_name=_(u"External id"), max_length=128, blank=True, db_column='id_externe')
class Meta:
db_table = 'o_t_poi'
verbose_name = _(u"POI")
verbose_name_plural = _(u"POI")
# Override default manager
objects = Topology.get_manager_cls(POIManager)()
def __unicode__(self):
return u"%s (%s)" % (self.name, self.type)
@models.permalink
def get_document_public_url(self):
""" Override ``geotrek.common.mixins.PublishableMixin``
"""
return ('trekking:poi_document_public', [], {'lang': get_language(), 'pk': self.pk, 'slug': self.slug})
def save(self, *args, **kwargs):
super(POI, self).save(*args, **kwargs)
# Invalidate treks map
for trek in self.treks.all():
try:
os.remove(trek.get_map_image_path())
except OSError:
pass
@property
def type_display(self):
return unicode(self.type)
@property
def serializable_type(self):
return {'label': self.type.label,
'pictogram': self.type.get_pictogram_url()}
@classmethod
def path_pois(cls, path):
return cls.objects.existing().filter(aggregations__path=path).distinct('pk')
@classmethod
def topology_pois(cls, topology):
if settings.TREKKING_TOPOLOGY_ENABLED:
qs = cls.overlapping(topology)
else:
area = topology.geom.buffer(settings.TREK_POI_INTERSECTION_MARGIN)
qs = cls.objects.existing().filter(geom__intersects=area)
return qs
@classmethod
def published_topology_pois(cls, topology):
return cls.topology_pois(topology).filter(published=True)
def distance(self, to_cls):
return settings.TOURISM_INTERSECTION_MARGIN
Path.add_property('pois', POI.path_pois, _(u"POIs"))
Topology.add_property('pois', POI.topology_pois, _(u"POIs"))
Topology.add_property('published_pois', POI.published_topology_pois, _(u"Published POIs"))
Intervention.add_property('pois', lambda self: self.topology.pois if self.topology else [], _(u"POIs"))
Project.add_property('pois', lambda self: self.edges_by_attr('pois'), _(u"POIs"))
tourism_models.TouristicContent.add_property('pois', lambda self: intersecting(POI, self), _(u"POIs"))
tourism_models.TouristicContent.add_property('published_pois', lambda self: intersecting(POI, self).filter(published=True), _(u"Published POIs"))
tourism_models.TouristicEvent.add_property('pois', lambda self: intersecting(POI, self), _(u"POIs"))
tourism_models.TouristicEvent.add_property('published_pois', lambda self: intersecting(POI, self).filter(published=True), _(u"Published POIs"))
class POIType(PictogramMixin):
label = models.CharField(verbose_name=_(u"Label"), max_length=128, db_column='nom')
cirkwi = models.ForeignKey('cirkwi.CirkwiPOICategory', verbose_name=_(u"Cirkwi POI category"), null=True, blank=True)
class Meta:
db_table = 'o_b_poi'
verbose_name = _(u"POI type")
verbose_name_plural = _(u"POI types")
ordering = ['label']
def __unicode__(self):
return self.label
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.