gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import datetime
import logging
import os
import sys
import datetime
import json
from dateutil.parser import parse
import multiprocessing
import ast
import random
# cloud
from pymongo import MongoClient
# kafka
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import KafkaError
from volttron.platform.vip.agent import Agent, Core, PubSub
from volttron.platform.messaging import topics
from volttron.platform.agent import utils
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = "0.2"
# refre agent creation walkthrough
# link : http://volttron.readthedocs.io/en/4.0.1/devguides/agent_development/Agent-Development.html
# refer example agent
# link : http://volttron.readthedocs.io/en/4.0.1/devguides/agent_development/Agent-Configuration-Store.html#example-agent
def cloud_agent(config_path, **kwargs):
'''
Function: Return CloudAgent object with configuration information
Args: Same with Class Args
Returns: CloudAgent object
Note: None
Created: JinhoSon, 2017-04-14
Deleted: .
'''
# get config information
config = utils.load_config(config_path)
source = config.get('source')
destination_ip = config.get('destination_ip')
destination_port = config.get('destination_port')
services_topic_list = config.get('services_topic_list')
database_name = config.get('database_name')
collection_name = config.get('collection_name')
command_topic = config.get('command_topic')
cloud_broker_ip = config.get('cloud_broker_ip')
cloud_broker_port = config.get('cloud_broker_port')
cloud_producer_topic = config.get('cloud_producer_topic')
cloud_consumer_topic = config.get('cloud_consumer_topic')
if 'all' in services_topic_list:
services_topic_list = [topics.DRIVER_TOPIC_BASE, topics.LOGGER_BASE,
topics.ACTUATOR, topics.ANALYSIS_TOPIC_BASE]
return CloudAgent(source,
destination_ip,
destination_port,
services_topic_list,
database_name,
collection_name,
command_topic,
cloud_broker_ip,
cloud_broker_port,
cloud_producer_topic,
cloud_consumer_topic,
**kwargs)
class CloudAgent(Agent):
'''
----------------------------------------------------------------------------
Agent summary
----------------------------------------------------------------------------
Name: CloudAgent
Version: 0.2
Function:
1. Subscribe data from message bus
2. Send device data to Cloud(MongoDB)
3. Send command history to Cloud(MongoDB)
4. Send message(command) to Cloud(Kafka consumer)
5. Receive message(command) from Cloud(Kafka producer)
6. Publish data to message bus(test for command)
7. Command to device point using RPC
Args:
source (str): zone name
destination_ip (str): MongoDB server ip in Cloud
destination_port (str): MongoDB server port in Cloud
services_topic_list (list): Topic Data sended to MongoDB server in Cloud
database_name (str): MongoDB database name(like database)
collection_name (str): MongoDB collection name(like table)
command_topic (str): When CloudAgent receives a command from another agent,
the topic that is uesd when other agents publih to MessageBus
cloud_broker_ip (str): Kafka Broker ip in Cloud
cloud_broker_port (str): Kafka Broker port in Cloud
cloud_producer_topic (str): Topic for messaging(commanding) from Cloud to VOLTTRON
cloud_consumer_topic (str): Topic for messaging(commanding) from VOLTTRON to Cloud
Returns:
None
Note:
Version 0.1: Add - Function 1, 2
Version 0.2: Add - Function 3, 4, 5, 6, 7
'''
'''
History
=====
Create '__init__' (by JinhoSon, 2017-04-14)
Create 'post_data' (by SungonLee, 2017-04-14)
Create 'on_message_topic' (by SungonLee, 2017-04-20)
Create 'subscriber' (by SungonLee, 2017-04-20)
Create 'actuate_something' (by SungonLee, 2017-07-20)
Create 'publish_command' (by SungonLee, 2017-09-10)
Create 'command_to_cloud' (by SungonLee, 2017-09-10)
Modify '__init__' (by SungonLee, 2017-09-20)
Create 'command_to_cloud_' (by SungonLee, 2017-09-20)
Delete 'command_to_cloud_' (by SungonLee, 2017-09-23)
'''
def __init__(self, source,
destination_ip,
destination_port,
services_topic_list,
database_name,
collection_name,
command_topic,
cloud_broker_ip,
cloud_broker_port,
cloud_producer_topic,
cloud_consumer_topic,
**kwargs):
'''
Function:
1. initiallizing the configuration information
2. Create Connection with MongoDB server, Kafka Consumer, Kafka Producer in Cloud
Args: Same with Class Args
Returns: None
Note:
self.connection: connection with MongoDB in Cloud
self.consumer: connection with kafka consumer in Cloud
self.producer: connection with kafka producer in Cloud
Created: JinhoSon, 2017-04-14
Modified: SungonLee, 2017-09-20
Deleted: .
'''
super(CloudAgent, self).__init__(**kwargs)
# set config info
self.source = source
self.destination_ip = destination_ip
self.destination_port = destination_port
self.services_topic_list = services_topic_list
self.database_name = database_name
self.collection_name = collection_name
self.command_topic = command_topic
self.cloud_broker_ip = cloud_broker_ip
self.cloud_broker_port = cloud_broker_port
self.cloud_producer_topic = cloud_producer_topic
self.cloud_consumer_topic = cloud_consumer_topic
self.default_config = {"source": source,
"destination_ip": destination_ip,
"destination_port": destination_port,
"services_topic_list": services_topic_list,
"database_name": database_name,
"collection_name": collection_name,
"command_topic": command_topic,
"cloud_broker_ip": cloud_broker_ip,
"cloud_broker_port": cloud_broker_port,
"cloud_producer_topic": cloud_producer_topic,
"cloud_consumer_topic": cloud_consumer_topic
}
_log.info('default_config: {}'.format(self.default_config))
self.vip.config.set_default("config", self.default_config)
# setting up callback_method for configuration store interface
self.vip.config.subscribe(self.configure_new, actions="NEW", pattern="cloud/*")
self.vip.config.subscribe(self.configure_update, actions=["UPDATE",], pattern="cloud/*")
self.vip.config.subscribe(self.configure_delete, actions=["DELETE",], pattern="cloud/*")
self.new_value_ = 0
# connect with local(or remote) mongodb
self.connection = MongoClient(self.destination_ip, int(self.destination_port))
self.db = self.connection[str(self.database_name)]
self.collection = self.db[str(self.collection_name)]
# kafka
self.cloud_producer_addr = '{0}:{1}'.format(self.cloud_broker_ip, self.cloud_broker_port)
self.consumer = KafkaConsumer(bootstrap_servers=[self.cloud_producer_addr])
self.consumer.subscribe([self.cloud_producer_topic])
# kafak producer - command volttron to cloud
# produce json messages
self.cloud_consumer_addr = '{0}:{1}'.format(self.cloud_broker_ip, self.cloud_broker_port)
self.producer = KafkaProducer(bootstrap_servers=[self.cloud_consumer_addr],
value_serializer=lambda v: json.dumps(v).encode('utf-8')
)
# configuration callbacks
# lnke : http://volttron.readthedocs.io/en/4.0.1/devguides/agent_development/Agent-Configuration-Store.html
# Ensure that we use default values from anything missing in the configuration
def configure_new(self, config_name, action, contents):
_log.debug("configure_new")
config = self.default_config.copy()
config.update(contents)
# update cloud agent config
def configure_update(self, config_name, action, contents):
_log.debug("configure_update")
# delete cloud agent config
def configure_delete(self, config_name, action, contents):
_log.debug("configure_delete")
def post_data(self, peer=None, sender=None, bus=None, topic=None, headers=None, message=None):
'''
Function: Send device data to Cloud(MongoDB).
Args:
peer: the ZMQ identity of the bus owner sender is identity of the publishing peer
sender: identity of agent publishing messages to messagebus
bus:
topic: the full message topic
headers: case-insensitive dictionary (mapping) of message headers
message: possibly empty list of message parts
Returns: None
Note:
callback method for subscribing.
subscribe message topic: actuator, record, datalogger and device topics send data to MongoDB(Cloud or Local)
Created: JinhoSon, 2017-04-14
Modified: SungonLee, 2017-5-20
Deleted: .
'''
try:
_log.info('Post_data: subscribe from message bus, topic:{0}, message:{1}, sender:{2}'
.format(topic ,message, sender, bus))
post = {
'author': 'volttron.cloudagnet',
'source': self.source,
'date': str(datetime.datetime.now()),
'topic': topic,
'headers': headers,
'message': message,
}
post_id = self.collection.insert(post)
_log.debug('mongodb insertion success topic : {}, message : {}'
.format(topic, message))
except Exception as e:
_log.error('Post_data: {}'.format(e))
def command_to_cloud(self, peer, sender, bus, topic, headers, message):
'''
Function:
Send Command to Cloud.
Send Command history to Cloud(MongoDB).
Args: Same with 'post_data'
Returns: None
Note:
Callback method for subscribing.
Subscribe message topic: 'command-to-cloud' send command to cloud,
producer(CloudAgent)-> kafka broker(Cloud) -> consumer(Cloud)
Created: SungonLee, 2017-09-10
Deleted: .
'''
try:
_log.info('Command_to_cloud: subscribe from message bus, topic:{0}, message:{1}, sender:{2}'
.format(topic ,message, sender, bus))
new_value = message[0]
msg = {'from': 'CloudAgent', 'to':'Cloud'
,'message': 'message from VOLTTRON to Cloud', 'new_value': new_value}
# Send command to Consumer(in Cloud)
self.producer.send(self.cloud_consumer_topic, msg)
# Send command data to MongoDB(in Cloud)
self.post_data(topic=self.cloud_consumer_topic, message=msg)
except Exception as e:
_log.error('Command_to_cloud: {}'.format(e))
@Core.receiver("onstart")
def on_message_topic(self, sender, **kwargs):
'''
Function: Resister callback method for sending data(device data, command history) to Cloud(MongoDB).
Args: .
Returns: None
Note:
This method is executed after '__init__' method.
Subscribes to the platform message bus on the actuator, record, datalogger, and device topics.
Created: JinhoSon, 2017-04-14
Modified: SungonLee, 2017-05-20
Deleted: .
'''
_log.debug("sender {}, Kwargs {}".format(sender, kwargs))
# Define method for resistering callback method
def subscriber(subscription, callback_method):
'''
Args:
subscription: topic (e.g. "devices/fake-campus/fake-building/fake-device/PowerState")
callback_method: method resistered
Note:
callback_mothod: 'post_data', 'command_to_cloud'
'''
_log.debug("Subscribing to topic : {}".format(subscription))
self.vip.pubsub.subscribe(peer='pubsub',
prefix=subscription,
callback=callback_method)
# Resister callback method with 'subscriber'
for topic_subscriptions in self.services_topic_list:
subscriber(topic_subscriptions, self.post_data)
subscriber(self.command_topic, self.command_to_cloud)
@Core.periodic(1)
def actuate_something(self):
'''
Function:
Receive message(command) from Cloud(Kafka broker).
Use RPC to set device point value with message infomation.
Args: None
Returns: None
Note: None
Created: SungonLee, 2017-07-20
Modified: SungonLee, 2017-09-20
Deleted: .
'''
# partition type : nametuple
# if timeout_ms is 0, check that is there any message in broker imm
partition = self.consumer.poll(timeout_ms=0, max_records=None)
try:
if len(partition) > 0:
for p in partition:
for response in partition[p]:
# convert string to dictionary
response_dict = ast.literal_eval(response.value)
_log.info('Actuate_something: Receive message from cloud message: {}, new_value: {}'
.format(response_dict, response_dict['new_value']))
new_value = response_dict['new_value']
device_point = response_dict['device_point']
# Use RPC to get point-value in device
result = self.vip.rpc.call(
'platform.actuator',
'get_point',
device_point
).get(timeout=10)
_log.info("Actuate_something: Reading Before commmand: {}".format(result))
# Use RPC to set point-value in device
result = self.vip.rpc.call(
'platform.actuator',
'set_point',
self.core.identity,
device_point,
new_value,
).get(timeout=10)
_log.info("Actuate_something: Reading After command: {}".format(result))
# Send command data to MongoDB(in Cloud)
msg = {'from': 'Cloud',
'to':'CloudAgent',
'message': 'message from Cloud to VOLTTRON',
'device_point': device_point,
'new_value': new_value}
self.post_data(topic=self.cloud_producer_topic, message=msg)
else:
_log.info('Actuate_something: Not receive command from cloud')
except Exception as e:
_log.error('Actuate_something: {}'.format(e))
@Core.periodic(5)
def publish_command(self):
'''
Function:
Publish message(command) to MessageBus(VOLTTRON)
after that CloudAgent subscribes this message from MessageBus
after that CloudAgent sends this message(command) to Cloud.
Args: .
Returns: None
Note:
Test method for publishing example message to MessageBus.
Publish message(command) to MessageBus(VOLTTRON) with topic in config file 'command_topic'.
Period for Publishing message can be exchanged(current 5s).
Created: SungonLee, 2017-09-20
Deleted: .
'''
try:
# Create time, message, value info
headers = {
'date': str(datetime.datetime.now())
}
message = [
self.new_value_,
{
'message': 'message VOLTTRON to Cloud',
'new_value': self.new_value_,
}
]
self.new_value_ += 1
topic = self.command_topic
self.vip.pubsub.publish('pubsub', topic, headers, message)
_log.info('Publish_command: publish to message bus, topic:{0}, new_value_:{1}, message:{2}'
.format(topic ,self.new_value_, 'message VOLTTRON to Cloud'))
except Exception as e:
_log.error('Publish_command: {}'.format(e))
# @Core.periodic(5)
# def command_to_cloud_(self):
# '''
# Function:
# Send message(command) to Cloud(Kafka broker)
#
# Args: .
#
# Returns: None
#
# Note:
# Test method for sending example message to Cloud.
# Period for sending message can be exchanged(current 5s).
#
# Created: SungonLee, 2017-09-20
# Deleted: SungonLee, 2017-09-23
# '''
# try:
# new_value = random.randrange(200, 300)
# msg = {'title': 'cloud-title', 'message': 'volttron_to_cloud', 'new_value': new_value}
# # j_msg = json.dumps(msg)
# # print('mag: {}\nj_msg: {}\n\n'.format(msg, j_msg))
# _log.info('Command msg: {}\n'.format(msg))
# # sent('topic', value)
# self.producer.send('cloud-topic', msg)
#
# except Exception as e:
# _log.error('Command_to_cloud: {}'.format(e))
def main(argv=sys.argv):
'''Main method called to start the agent.'''
utils.vip_main(cloud_agent, identity='cloudagent',
version=__version__)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
|
import threading
from unittest import mock
from mopidy import backend as backend_api
import spotify
from mopidy_spotify import backend, library, playback, playlists
def get_backend(config, session_mock=None):
obj = backend.SpotifyBackend(config=config, audio=None)
if session_mock:
obj._session = session_mock
else:
obj._session = mock.Mock()
obj._web_client = mock.Mock()
obj._event_loop = mock.Mock()
return obj
def test_uri_schemes(spotify_mock, config):
backend = get_backend(config)
assert "spotify" in backend.uri_schemes
def test_init_sets_up_the_providers(spotify_mock, config):
backend = get_backend(config)
assert isinstance(backend.library, library.SpotifyLibraryProvider)
assert isinstance(backend.library, backend_api.LibraryProvider)
assert isinstance(backend.playback, playback.SpotifyPlaybackProvider)
assert isinstance(backend.playback, backend_api.PlaybackProvider)
assert isinstance(backend.playlists, playlists.SpotifyPlaylistsProvider)
assert isinstance(backend.playlists, backend_api.PlaylistsProvider)
def test_init_disables_playlists_provider_if_not_allowed(spotify_mock, config):
config["spotify"]["allow_playlists"] = False
backend = get_backend(config)
assert backend.playlists is None
def test_on_start_creates_configured_session(tmp_path, spotify_mock, config):
cache_location_mock = mock.PropertyMock()
settings_location_mock = mock.PropertyMock()
config_mock = spotify_mock.Config.return_value
type(config_mock).cache_location = cache_location_mock
type(config_mock).settings_location = settings_location_mock
get_backend(config).on_start()
spotify_mock.Config.assert_called_once_with()
config_mock.load_application_key_file.assert_called_once_with(mock.ANY)
cache_location_mock.assert_called_once_with(
bytes(tmp_path / "cache" / "spotify")
)
settings_location_mock.assert_called_once_with(
bytes(tmp_path / "data" / "spotify")
)
spotify_mock.Session.assert_called_once_with(config_mock)
def test_on_start_disallows_network_if_config_is_set(spotify_mock, config):
session = spotify_mock.Session.return_value
allow_network_mock = mock.PropertyMock()
type(session.connection).allow_network = allow_network_mock
config["spotify"]["allow_network"] = False
get_backend(config).on_start()
allow_network_mock.assert_called_once_with(False)
def test_on_start_configures_preferred_bitrate(spotify_mock, config):
session = spotify_mock.Session.return_value
preferred_bitrate_mock = mock.PropertyMock()
type(session).preferred_bitrate = preferred_bitrate_mock
config["spotify"]["bitrate"] = 320
get_backend(config).on_start()
preferred_bitrate_mock.assert_called_once_with(spotify.Bitrate.BITRATE_320k)
def test_on_start_configures_volume_normalization(spotify_mock, config):
session = spotify_mock.Session.return_value
volume_normalization_mock = mock.PropertyMock()
type(session).volume_normalization = volume_normalization_mock
config["spotify"]["volume_normalization"] = False
get_backend(config).on_start()
volume_normalization_mock.assert_called_once_with(False)
def test_on_start_configures_proxy(spotify_mock, web_mock, config):
config["proxy"] = {
"scheme": "https",
"hostname": "my-proxy.example.com",
"port": 8080,
"username": "alice",
"password": "s3cret",
}
spotify_config = spotify_mock.Config.return_value
backend = get_backend(config)
backend.on_start()
assert spotify_config.proxy == "https://my-proxy.example.com:8080"
assert spotify_config.proxy_username == "alice"
assert spotify_config.proxy_password == "s3cret"
web_mock.SpotifyOAuthClient.assert_called_once_with(
client_id=mock.ANY,
client_secret=mock.ANY,
proxy_config=config["proxy"],
)
def test_on_start_configures_web_client(spotify_mock, web_mock, config):
config["spotify"]["client_id"] = "1234567"
config["spotify"]["client_secret"] = "AbCdEfG"
backend = get_backend(config)
backend.on_start()
web_mock.SpotifyOAuthClient.assert_called_once_with(
client_id="1234567",
client_secret="AbCdEfG",
proxy_config=mock.ANY,
)
def test_on_start_adds_connection_state_changed_handler_to_session(
spotify_mock, config
):
session = spotify_mock.Session.return_value
get_backend(config).on_start()
session.on.assert_any_call(
spotify_mock.SessionEvent.CONNECTION_STATE_UPDATED,
backend.on_connection_state_changed,
backend.SpotifyBackend._logged_in,
backend.SpotifyBackend._logged_out,
mock.ANY,
)
def test_on_start_adds_play_token_lost_handler_to_session(spotify_mock, config):
session = spotify_mock.Session.return_value
obj = get_backend(config)
obj.on_start()
session.on.assert_any_call(
spotify_mock.SessionEvent.PLAY_TOKEN_LOST,
backend.on_play_token_lost,
mock.ANY,
)
def test_on_start_starts_the_pyspotify_event_loop(spotify_mock, config):
backend = get_backend(config)
backend.on_start()
spotify_mock.EventLoop.assert_called_once_with(backend._session)
spotify_mock.EventLoop.return_value.start.assert_called_once_with()
def test_on_start_logs_in(spotify_mock, web_mock, config):
backend = get_backend(config)
backend.on_start()
spotify_mock.Session.return_value.login.assert_called_once_with(
"alice", "password"
)
web_mock.SpotifyOAuthClient.return_value.login.assert_called_once()
def test_on_start_refreshes_playlists(spotify_mock, web_mock, config, caplog):
backend = get_backend(config)
backend.on_start()
client_mock = web_mock.SpotifyOAuthClient.return_value
client_mock.get_user_playlists.assert_called_once()
assert "Refreshed 0 Spotify playlists" in caplog.text
assert backend.playlists._loaded
def test_on_start_doesnt_refresh_playlists_if_not_allowed(
spotify_mock, web_mock, config, caplog
):
config["spotify"]["allow_playlists"] = False
backend = get_backend(config)
backend.on_start()
client_mock = web_mock.SpotifyOAuthClient.return_value
client_mock.get_user_playlists.assert_not_called()
assert "Refreshed 0 playlists" not in caplog.text
def test_on_stop_logs_out_and_waits_for_logout_to_complete(
spotify_mock, config, caplog
):
backend = get_backend(config)
backend._logged_out = mock.Mock()
backend.on_stop()
assert "Logging out of Spotify" in caplog.text
backend._session.logout.assert_called_once_with()
backend._logged_out.wait.assert_called_once_with()
backend._event_loop.stop.assert_called_once_with()
def test_on_connection_state_changed_when_logged_out(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.LOGGED_OUT
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock
)
assert "Logged out of Spotify" in caplog.text
assert not logged_in_event.is_set()
assert logged_out_event.is_set()
def test_on_connection_state_changed_when_logged_in(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.LOGGED_IN
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock
)
assert "Logged in to Spotify in online mode" in caplog.text
assert logged_in_event.is_set()
assert not logged_out_event.is_set()
actor_mock.on_logged_in.assert_called_once_with()
def test_on_connection_state_changed_when_disconnected(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.DISCONNECTED
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock
)
assert "Disconnected from Spotify" in caplog.text
def test_on_connection_state_changed_when_offline(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.OFFLINE
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock
)
assert "Logged in to Spotify in offline mode" in caplog.text
assert logged_in_event.is_set()
assert not logged_out_event.is_set()
def test_on_logged_in_event_activates_private_session(
spotify_mock, config, caplog
):
session_mock = spotify_mock.Session.return_value
private_session_mock = mock.PropertyMock()
type(session_mock.social).private_session = private_session_mock
config["spotify"]["private_session"] = True
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert "Spotify private session activated" in caplog.text
private_session_mock.assert_called_once_with(True)
def test_on_play_token_lost_messages_the_actor(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_play_token_lost(session_mock, actor_mock)
assert "Spotify play token lost" in caplog.text
actor_mock.on_play_token_lost.assert_called_once_with()
def test_on_play_token_lost_event_when_playing(spotify_mock, config, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.player.state = spotify_mock.PlayerState.PLAYING
backend = get_backend(config, session_mock)
backend.playback = mock.Mock(spec=playback.SpotifyPlaybackProvider)
backend.on_play_token_lost()
assert (
"Spotify has been paused because your account is "
"being used somewhere else." in caplog.text
)
backend.playback.pause.assert_called_once_with()
def test_on_play_token_lost_event_when_not_playing(
spotify_mock, config, caplog
):
session_mock = spotify_mock.Session.return_value
session_mock.player.state = spotify_mock.PlayerState.UNLOADED
backend = get_backend(config, session_mock)
backend.playback = mock.Mock(spec=playback.SpotifyPlaybackProvider)
backend.on_play_token_lost()
assert "Spotify has been paused" not in caplog.text
assert backend.playback.pause.call_count == 0
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PHP devappserver2 runtime."""
import base64
import cStringIO
import httplib
import logging
import os
import subprocess
import sys
import time
import urllib
import google
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import php
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import wsgi_server
SDK_PATH = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), 'php/sdk'))
SETUP_PHP_PATH = os.path.join(os.path.dirname(php.__file__), 'setup.php')
class PHPRuntime(object):
"""A WSGI application that runs PHP scripts using the PHP CGI binary."""
def __init__(self, config):
logging.debug('Initializing runtime with %s', config)
self.config = config
self.environ_template = {
'APPLICATION_ID': str(config.app_id),
'CURRENT_VERSION_ID': str(config.version_id),
'DATACENTER': str(config.datacenter),
'INSTANCE_ID': str(config.instance_id),
'APPENGINE_RUNTIME': 'php',
'AUTH_DOMAIN': str(config.auth_domain),
'HTTPS': 'off',
# By default php-cgi does not allow .php files to be run directly so
# REDIRECT_STATUS must be set. See:
# http://php.net/manual/en/security.cgi-bin.force-redirect.php
'REDIRECT_STATUS': '1',
'REMOTE_API_PORT': str(config.api_port),
'SERVER_SOFTWARE': http_runtime_constants.SERVER_SOFTWARE,
'TZ': 'UTC',
}
self.environ_template.update((env.key, env.value) for env in config.environ)
def __call__(self, environ, start_response):
"""Handles an HTTP request for the runtime using a PHP executable.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
user_environ = self.environ_template.copy()
self.copy_headers(environ, user_environ)
user_environ['REQUEST_METHOD'] = environ.get('REQUEST_METHOD', 'GET')
user_environ['PATH_INFO'] = environ['PATH_INFO']
user_environ['QUERY_STRING'] = environ['QUERY_STRING']
# Construct the partial URL that PHP expects for REQUEST_URI
# (http://php.net/manual/en/reserved.variables.server.php) using part of
# the process described in PEP-333
# (http://www.python.org/dev/peps/pep-0333/#url-reconstruction).
user_environ['REQUEST_URI'] = urllib.quote(user_environ['PATH_INFO'])
if user_environ['QUERY_STRING']:
user_environ['REQUEST_URI'] += '?' + user_environ['QUERY_STRING']
# Modify the SCRIPT_FILENAME to specify the setup script that readies the
# PHP environment. Put the user script in REAL_SCRIPT_FILENAME.
user_environ['REAL_SCRIPT_FILENAME'] = environ[
http_runtime_constants.SCRIPT_HEADER]
user_environ['SCRIPT_FILENAME'] = SETUP_PHP_PATH
user_environ['REMOTE_REQUEST_ID'] = environ[
http_runtime_constants.REQUEST_ID_ENVIRON]
# Pass the APPLICATION_ROOT so we can use it in the setup script. We will
# remove it from the environment before we execute the user script.
user_environ['APPLICATION_ROOT'] = self.config.application_root
if 'CONTENT_TYPE' in environ:
user_environ['CONTENT_TYPE'] = environ['CONTENT_TYPE']
user_environ['HTTP_CONTENT_TYPE'] = environ['CONTENT_TYPE']
if 'CONTENT_LENGTH' in environ:
user_environ['CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
user_environ['HTTP_CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
content = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
else:
content = None
# On Windows, in order to run a side-by-side assembly the specified env
# must include a valid SystemRoot.
if 'SYSTEMROOT' in os.environ:
user_environ['SYSTEMROOT'] = os.environ['SYSTEMROOT']
# See http://www.php.net/manual/en/ini.core.php#ini.include-path.
include_paths = [self.config.application_root, SDK_PATH]
if sys.platform == 'win32':
# See https://bugs.php.net/bug.php?id=46034 for quoting requirements.
include_path = 'include_path="%s"' % ';'.join(include_paths)
else:
include_path = 'include_path=%s' % ':'.join(include_paths)
args = [self.config.php_config.php_executable_path, '-d', include_path]
if self.config.php_config.enable_debugger:
args.extend(['-d', 'xdebug.remote_enable="1"'])
user_environ['XDEBUG_CONFIG'] = os.environ.get('XDEBUG_CONFIG', '')
try:
p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=user_environ,
cwd=self.config.application_root)
stdout, stderr = p.communicate(content)
except Exception as e:
logging.exception('Failure to start PHP with: %s', args)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
return ['Failure to start the PHP subprocess with %r:\n%s' % (args, e)]
if p.returncode:
logging.error('php failure (%r) with:\nstdout:\n%sstderr:\n%s',
p.returncode, stdout, stderr)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
return ['php failure (%r) with:\nstdout:%s\nstderr:\n%s' %
(p.returncode, stdout, stderr)]
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
assert 'Content-Type' in message, 'invalid CGI response: %r' % stdout
if 'Status' in message:
status = message['Status']
del message['Status']
else:
status = '200 OK'
# Ensures that we avoid merging repeat headers into a single header,
# allowing use of multiple Set-Cookie headers.
headers = []
for name in message:
for value in message.getheaders(name):
headers.append((name, value))
start_response(status, headers)
return [message.fp.read()]
def copy_headers(self, source_environ, dest_environ):
"""Copy headers from source_environ to dest_environ.
This extracts headers that represent environ values and propagates all
other headers which are not used for internal implementation details or
headers that are stripped.
Args:
source_environ: The source environ dict.
dest_environ: The environ dict to populate.
"""
# TODO: This method is copied from python/runtime.py. If this
# method isn't obsoleted, consider moving it to some sort of utility module.
for env in http_runtime_constants.ENVIRONS_TO_PROPAGATE:
value = source_environ.get(
http_runtime_constants.INTERNAL_ENVIRON_PREFIX + env, None)
if value is not None:
dest_environ[env] = value
for name, value in source_environ.items():
if (name.startswith('HTTP_') and
not name.startswith(http_runtime_constants.INTERNAL_ENVIRON_PREFIX)):
dest_environ[name] = value
def main():
config = runtime_config_pb2.Config()
config.ParseFromString(base64.b64decode(sys.stdin.read()))
server = wsgi_server.WsgiServer(
('localhost', 0),
request_rewriter.runtime_rewriter_middleware(PHPRuntime(config)))
server.start()
print server.port
sys.stdout.close()
sys.stdout = sys.stderr
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
server.quit()
if __name__ == '__main__':
main()
|
|
"""Python STIX2 Memory Source/Sink"""
import io
import itertools
import json
import os
from stix2 import v20, v21
from stix2.base import _STIXBase
from stix2.datastore import DataSink, DataSource, DataStoreMixin
from stix2.datastore.filters import FilterSet, apply_common_filters
from stix2.parsing import parse
def _add(store, stix_data, allow_custom=True, version=None):
"""Add STIX objects to MemoryStore/Sink.
Adds STIX objects to an in-memory dictionary for fast lookup.
Recursive function, breaks down STIX Bundles and lists.
Args:
store: A MemoryStore, MemorySink or MemorySource object.
stix_data (list OR dict OR STIX object): STIX objects to be added
allow_custom (bool): Whether to allow custom properties as well unknown
custom objects. Note that unknown custom objects cannot be parsed
into STIX objects, and will be returned as is. Default: False.
version (str): Which STIX2 version to lock the parser to. (e.g. "2.0",
"2.1"). If None, the library makes the best effort to figure
out the spec representation of the object.
"""
if isinstance(stix_data, list):
# STIX objects are in a list- recurse on each object
for stix_obj in stix_data:
_add(store, stix_obj, allow_custom, version)
elif stix_data["type"] == "bundle":
# adding a json bundle - so just grab STIX objects
for stix_obj in stix_data.get("objects", []):
_add(store, stix_obj, allow_custom, version)
else:
# Adding a single non-bundle object
if isinstance(stix_data, _STIXBase):
stix_obj = stix_data
else:
stix_obj = parse(stix_data, allow_custom, version)
# Map ID to a _ObjectFamily if the object is versioned, so we can track
# multiple versions. Otherwise, map directly to the object. All
# versioned objects should have a "modified" property.
if "modified" in stix_obj:
if stix_obj["id"] in store._data:
obj_family = store._data[stix_obj["id"]]
else:
obj_family = _ObjectFamily()
store._data[stix_obj["id"]] = obj_family
obj_family.add(stix_obj)
else:
store._data[stix_obj["id"]] = stix_obj
class _ObjectFamily(object):
"""
An internal implementation detail of memory sources/sinks/stores.
Represents a "family" of STIX objects: all objects with a particular
ID. (I.e. all versions.) The latest version is also tracked so that it
can be obtained quickly.
"""
def __init__(self):
self.all_versions = {}
self.latest_version = None
def add(self, obj):
self.all_versions[obj["modified"]] = obj
if (
self.latest_version is None or
obj["modified"] > self.latest_version["modified"]
):
self.latest_version = obj
def __str__(self):
return "<<{}; latest={}>>".format(
self.all_versions,
self.latest_version["modified"],
)
def __repr__(self):
return str(self)
class MemoryStore(DataStoreMixin):
"""Interface to an in-memory dictionary of STIX objects.
MemoryStore is a wrapper around a paired MemorySink and MemorySource.
Note: It doesn't make sense to create a MemoryStore by passing
in existing MemorySource and MemorySink because there could
be data concurrency issues. As well, just as easy to create new MemoryStore.
Args:
stix_data (list OR dict OR STIX object): STIX content to be added
allow_custom (bool): whether to allow custom STIX content.
Only applied when export/input functions called, i.e.
load_from_file() and save_to_file(). Defaults to True.
Attributes:
_data (dict): the in-memory dict that holds STIX objects
source (MemorySource): MemorySource
sink (MemorySink): MemorySink
"""
def __init__(self, stix_data=None, allow_custom=True, version=None):
self._data = {}
if stix_data:
_add(self, stix_data, allow_custom, version)
super(MemoryStore, self).__init__(
source=MemorySource(stix_data=self._data, allow_custom=allow_custom, version=version, _store=True),
sink=MemorySink(stix_data=self._data, allow_custom=allow_custom, version=version, _store=True),
)
def save_to_file(self, *args, **kwargs):
"""Write SITX objects from in-memory dictionary to JSON file, as a STIX
Bundle. If a directory is given, the Bundle 'id' will be used as
filename. Otherwise, the provided value will be used.
Args:
path (str): file path to write STIX data to.
encoding (str): The file encoding. Default utf-8.
"""
return self.sink.save_to_file(*args, **kwargs)
def load_from_file(self, *args, **kwargs):
"""Load STIX data from JSON file.
File format is expected to be a single JSON STIX object or JSON STIX
bundle.
Args:
path (str): file path to load STIX data from
"""
return self.source.load_from_file(*args, **kwargs)
class MemorySink(DataSink):
"""Interface for adding/pushing STIX objects to an in-memory dictionary.
Designed to be paired with a MemorySource, together as the two
components of a MemoryStore.
Args:
stix_data (dict OR list): valid STIX 2.0 content in
bundle or a list.
_store (bool): whether the MemorySink is a part of a MemoryStore,
in which case "stix_data" is a direct reference to
shared memory with DataSource. Not user supplied
allow_custom (bool): whether to allow custom objects/properties
when exporting STIX content to file.
Default: True.
version (str): If present, it forces the parser to use the version
provided. Otherwise, the library will make the best effort based
on checking the "spec_version" property.
Attributes:
_data (dict): the in-memory dict that holds STIX objects.
If part of a MemoryStore, the dict is shared with a MemorySource
"""
def __init__(self, stix_data=None, allow_custom=True, version=None, _store=False):
super(MemorySink, self).__init__()
self.allow_custom = allow_custom
if _store:
self._data = stix_data
else:
self._data = {}
if stix_data:
_add(self, stix_data, allow_custom, version)
def add(self, stix_data, version=None):
_add(self, stix_data, self.allow_custom, version)
add.__doc__ = _add.__doc__
def save_to_file(self, path, encoding="utf-8"):
path = os.path.abspath(path)
all_objs = list(
itertools.chain.from_iterable(
value.all_versions.values() if isinstance(value, _ObjectFamily)
else [value]
for value in self._data.values()
),
)
if any("spec_version" in x for x in all_objs):
bundle = v21.Bundle(all_objs, allow_custom=self.allow_custom)
else:
bundle = v20.Bundle(all_objs, allow_custom=self.allow_custom)
if path.endswith(".json"):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
else:
if not os.path.exists(path):
os.makedirs(path)
# if the user only provided a directory, use the bundle id for filename
path = os.path.join(path, bundle["id"] + ".json")
with io.open(path, "w", encoding=encoding) as f:
bundle = bundle.serialize(pretty=True, encoding=encoding, ensure_ascii=False)
f.write(bundle)
return path
save_to_file.__doc__ = MemoryStore.save_to_file.__doc__
class MemorySource(DataSource):
"""Interface for searching/retrieving STIX objects from an in-memory
dictionary.
Designed to be paired with a MemorySink, together as the two
components of a MemoryStore.
Args:
stix_data (dict OR list OR STIX object): valid STIX 2.0 content in
bundle or list.
_store (bool): if the MemorySource is a part of a MemoryStore,
in which case "stix_data" is a direct reference to shared
memory with DataSink. Not user supplied
allow_custom (bool): whether to allow custom objects/properties
when importing STIX content from file.
Default: True.
version (str): If present, it forces the parser to use the version
provided. Otherwise, the library will make the best effort based
on checking the "spec_version" property.
Attributes:
_data (dict): the in-memory dict that holds STIX objects.
If part of a MemoryStore, the dict is shared with a MemorySink
"""
def __init__(self, stix_data=None, allow_custom=True, version=None, _store=False):
super(MemorySource, self).__init__()
self.allow_custom = allow_custom
if _store:
self._data = stix_data
else:
self._data = {}
if stix_data:
_add(self, stix_data, allow_custom, version)
def get(self, stix_id, _composite_filters=None):
"""Retrieve STIX object from in-memory dict via STIX ID.
Args:
stix_id (str): The STIX ID of the STIX object to be retrieved.
_composite_filters (FilterSet): collection of filters passed from the parent
CompositeDataSource, not user supplied
Returns:
(STIX object): STIX object that has the supplied ID.
"""
stix_obj = None
mapped_value = self._data.get(stix_id)
if mapped_value:
if isinstance(mapped_value, _ObjectFamily):
stix_obj = mapped_value.latest_version
else:
stix_obj = mapped_value
if stix_obj:
all_filters = list(
itertools.chain(
_composite_filters or [],
self.filters,
),
)
stix_obj = next(apply_common_filters([stix_obj], all_filters), None)
return stix_obj
def all_versions(self, stix_id, _composite_filters=None):
"""Retrieve STIX objects from in-memory dict via STIX ID, all versions
of it.
Args:
stix_id (str): The STIX ID of the STIX 2 object to retrieve.
_composite_filters (FilterSet): collection of filters passed from
the parent CompositeDataSource, not user supplied
Returns:
(list): list of STIX objects that have the supplied ID.
"""
results = []
mapped_value = self._data.get(stix_id)
if mapped_value:
if isinstance(mapped_value, _ObjectFamily):
stix_objs_to_filter = mapped_value.all_versions.values()
else:
stix_objs_to_filter = [mapped_value]
all_filters = list(
itertools.chain(
_composite_filters or [],
self.filters,
),
)
results.extend(
apply_common_filters(stix_objs_to_filter, all_filters),
)
return results
def query(self, query=None, _composite_filters=None):
"""Search and retrieve STIX objects based on the complete query.
A "complete query" includes the filters from the query, the filters
attached to this MemorySource, and any filters passed from a
CompositeDataSource (i.e. _composite_filters).
Args:
query (list): list of filters to search on
_composite_filters (FilterSet): collection of filters passed from
the CompositeDataSource, not user supplied
Returns:
(list): list of STIX objects that match the supplied query.
"""
query = FilterSet(query)
# combine all query filters
if self.filters:
query.add(self.filters)
if _composite_filters:
query.add(_composite_filters)
all_objs = itertools.chain.from_iterable(
value.all_versions.values() if isinstance(value, _ObjectFamily)
else [value]
for value in self._data.values()
)
# Apply STIX common property filters.
all_data = list(apply_common_filters(all_objs, query))
return all_data
def load_from_file(self, file_path, version=None, encoding='utf-8'):
with io.open(os.path.abspath(file_path), "r", encoding=encoding) as f:
stix_data = json.load(f)
_add(self, stix_data, self.allow_custom, version)
load_from_file.__doc__ = MemoryStore.load_from_file.__doc__
|
|
"""
GFA1 Serializer for nodes, edge, Subgraphs and networkx graphs.
Can serialize either one of the object from the group mentioned
before or from a dictionary with equivalent key.
"""
import copy
import logging
import networkx as nx
from pygfa.graph_element.parser import field_validator as fv
from pygfa.serializer import utils
class GFA1SerializationError(Exception): pass
serializer_logger = logging.getLogger(__name__)
DEFAULT_IDENTIFIER = "no identifier given."
SEGMENT_FIELDS = [fv.GFA1_NAME, fv.GFA1_SEQUENCE]
LINK_FIELDS = [\
fv.GFA1_NAME, \
fv.GFA1_ORIENTATION, \
fv.GFA1_NAME, \
fv.GFA1_ORIENTATION, \
fv.GFA1_CIGAR]
CONTAINMENT_FIELDS = [\
fv.GFA1_NAME, \
fv.GFA1_ORIENTATION, \
fv.GFA1_NAME, \
fv.GFA1_ORIENTATION, \
fv.GFA1_INT, \
fv.GFA1_CIGAR]
PATH_FIELDS = [\
fv.GFA1_NAME, \
fv.GFA1_NAMES, \
fv.GFA1_CIGARS]
################################################################################
# NODE SERIALIZER
################################################################################
def serialize_node(node_, identifier=DEFAULT_IDENTIFIER):
"""Serialize to the GFA1 specification a Graph Element Node or a
dictionary that has the same informations.
:param node: A Graph Element Node or a dictionary.
:param identifier: If set help gaining useful debug information.
:return "": If the object cannot be serialized to GFA.
"""
identifier = utils._check_identifier(identifier)
try:
if isinstance(node_, dict):
node_dict = copy.deepcopy(node_)
defined_fields = [ \
node_dict.pop('nid'), \
node_dict.pop('sequence') \
]
node_dict.pop('slen')
fields = ["S"]
fields.append(str(node_['nid']))
fields.append(str(node_['sequence']))
if node_['slen'] != None:
fields.append("LN:i:" + str(node_['slen']))
fields.extend(utils._serialize_opt_fields(node_dict))
else:
defined_fields = [ \
node_.nid, \
node_.sequence
]
fields = ["S"]
fields.append(str(node_.nid))
fields.append(str(node_.sequence))
if node_.slen != None:
fields.append("LN:i:" + str(node_.slen))
fields.extend(utils._serialize_opt_fields(node_.opt_fields))
if not utils._are_fields_defined(defined_fields) or \
not utils._check_fields(fields[1:], SEGMENT_FIELDS):
raise GFA1SerializationError("Required node elements " \
+ "missing or invalid.")
return str.join("\t", fields)
except (KeyError, AttributeError, GFA1SerializationError) as e:
serializer_logger.debug(utils._format_exception(identifier, e))
return ""
################################################################################
# EDGE SERIALIZER
################################################################################
def serialize_edge(edge_, identifier=DEFAULT_IDENTIFIER):
"""Converts to a GFA1 line the given edge.
Fragments and Gaps cannot be represented in GFA1 specification,
so they are not serialized.
"""
identifier = utils._check_identifier(identifier)
try:
if isinstance(edge_, dict):
if edge_['eid'] is None: # edge_ is a fragment
raise GFA1SerializationError("Cannot serialize Fragment " \
+ "to GFA1.")
elif edge_['distance'] != None or \
edge_['variance'] != None: # edge_ is a gap
raise GFA1SerializationError("Cannot serialize GAP " \
+ "to GFA1.")
elif 'pos' in edge_: # edge_ is a containment
return _serialize_to_containment(edge_, identifier)
elif edge_['is_dovetail'] is True:
return _serialize_to_link(edge_, identifier)
else:
raise GFA1SerializationError("Cannot convert an " \
+ "internal edge to a Link")
else:
if edge_.eid is None: # edge_ is a fragment
raise GFA1SerializationError("Cannot serialize Fragment " \
+ "to GFA1.")
elif edge_.distance != None or \
edge_.variance != None: # edge_ is a gap
raise GFA1SerializationError("Cannot serialize GAP " \
+ "to GFA1.")
elif 'pos' in edge_.opt_fields: # edge_ is a containment
return _serialize_to_containment(edge_)
elif edge_.is_dovetail is True:
return _serialize_to_link(edge_)
else:
raise GFA1SerializationError("Cannot convert an " \
+ "internal edge to a Link")
except (KeyError, AttributeError, GFA1SerializationError) as e:
serializer_logger.debug(utils._format_exception(identifier, e))
return ""
def _serialize_to_containment(containment_, identifier=DEFAULT_IDENTIFIER):
identifier = utils._check_identifier(identifier)
try:
if isinstance(containment_, dict):
containment_dict = copy.deepcopy(containment_)
utils._remove_common_edge_fields(containment_dict)
containment_dict.pop('pos')
defined_fields = [ \
containment_['from_node'], \
containment_['from_orn'], \
containment_['to_node'], \
containment_['to_orn'], \
containment_['alignment'], \
containment_['pos'].value
]
fields = ["C"]
fields.append(str(containment_['from_node']))
fields.append(str(containment_['from_orn']))
fields.append(str(containment_['to_node']))
fields.append(str(containment_['to_orn']))
fields.append(str(containment_['pos'].value))
if fv.is_gfa1_cigar(containment_['alignment']):
fields.append(str(containment_['alignment']))
else:
fields.append("*")
if not containment_['eid'] in(None, '*'):
fields.append("ID:Z:" + str(containment_['eid']))
fields.extend(utils._serialize_opt_fields(containment_dict))
else:
defined_fields = [ \
containment_.from_node, \
containment_.from_orn, \
containment_.to_node, \
containment_.to_orn, \
containment_.alignment, \
containment_.opt_fields['pos'].value \
]
fields = ["C"]
opt_fields = copy.deepcopy(containment_.opt_fields)
opt_fields.pop('pos')
fields.append(str(containment_.from_node))
fields.append(str(containment_.from_orn))
fields.append(str(containment_.to_node))
fields.append(str(containment_.to_orn))
fields.append(str(containment_.opt_fields['pos'].value))
if fv.is_gfa1_cigar(containment_.alignment):
fields.append(str(containment_.alignment))
else:
fields.append("*")
if not containment_.eid in(None, '*'):
fields.append("ID:Z:" + str(containment_.eid))
fields.extend(utils._serialize_opt_fields(opt_fields))
if not utils._are_fields_defined(defined_fields) or \
not utils._check_fields(fields[1:], CONTAINMENT_FIELDS):
raise GFA1SerializationError()
return str.join("\t", fields)
except(KeyError, AttributeError, GFA1SerializationError) as e:
serializer_logger.debug(utils._format_exception(identifier, e))
return ""
def _serialize_to_link(link_, identifier=DEFAULT_IDENTIFIER):
identifier = utils._check_identifier(identifier)
try:
if isinstance(link_, dict):
link_dict = copy.deepcopy(link_)
utils._remove_common_edge_fields(link_dict)
defined_fields = [ \
link_['from_node'], \
link_['from_orn'], \
link_['to_node'], \
link_['to_orn'], \
link_['alignment'] \
]
fields = ["L"]
fields.append(str(link_['from_node']))
fields.append(str(link_['from_orn']))
fields.append(str(link_['to_node']))
fields.append(str(link_['to_orn']))
if fv.is_gfa1_cigar(link_['alignment']):
fields.append(str(link_['alignment']))
else:
fields.append("*")
if not link_['eid'] in(None, '*'):
fields.append("ID:Z:" + str(link_['eid']))
fields.extend(utils._serialize_opt_fields(link_dict))
else:
defined_fields = [ \
link_.from_node, \
link_.from_orn, \
link_.to_node, \
link_.to_orn, \
link_.alignment \
]
fields = ["L"]
fields.append(str(link_.from_node))
fields.append(str(link_.from_orn))
fields.append(str(link_.to_node))
fields.append(str(link_.to_orn))
if fv.is_gfa1_cigar(link_.alignment):
fields.append(str(link_.alignment))
else:
fields.append("*")
if not link_.eid in(None, '*'):
fields.append("ID:Z:" + str(link_.eid))
fields.extend(utils._serialize_opt_fields(link_.opt_fields))
if not utils._are_fields_defined(defined_fields) or \
not utils._check_fields(fields[1:], LINK_FIELDS):
raise GFA1SerializationError()
return str.join("\t", fields)
except(KeyError, AttributeError, GFA1SerializationError) as e:
serializer_logger.debug(utils._format_exception(identifier, e))
return ""
################################################################################
# SUBGRAPH SERIALIZER
################################################################################
def point_to_node(gfa_, node_id):
"""Check if the given node_id point to a node in the gfa graph.
"""
return gfa_.nodes(identifier = node_id) != None
def _serialize_subgraph_elements(subgraph_elements, gfa_=None):
"""Serialize the elements belonging to a subgraph.
Check if the orientation is provided for each element of the
subgraph.
If gfa is provided, each element can be tested wheter it
is a node or another element of the GFA graph.
Only nodes (segments) will be (and could be) serialized
to elements of the Path.
If a gfa graph is not provided there cannot be any control
over nodes, and data will be process as is.
:param subgraph: A Graph Element Subgraph.
:param gfa: The GFA object that contain the subgraph.
"""
elements = []
for id_, orientation in subgraph_elements.items():
if gfa_ is None:
if orientation != None:
elements.append(str(id_) + str(orientation))
else:
if orientation != None \
and point_to_node(gfa_, id_):
elements.append(str(id_) + str(orientation))
return str.join(",", elements)
def serialize_subgraph(subgraph_, identifier=DEFAULT_IDENTIFIER, gfa_=None):
"""Serialize a Subgraph object or an equivalent dictionary.
"""
identifier = utils._check_identifier(identifier)
try:
if isinstance(subgraph_, dict):
subgraph_dict = copy.deepcopy(subgraph_)
defined_fields = [\
subgraph_dict.pop('sub_id'), \
subgraph_dict.pop('elements') \
]
fields = ["P"]
fields.append(subgraph_['sub_id'])
fields.append(_serialize_subgraph_elements(subgraph_['elements'], gfa_))
if 'overlaps' in subgraph_:
subgraph_dict.pop('overlaps')
fields.append(str.join(",", subgraph_['overlaps'].value))
else:
fields.append("*")
fields.extend(utils._serialize_opt_fields(subgraph_dict))
else:
defined_fields = [\
subgraph_.sub_id, \
subgraph_.elements \
]
opt_fields = copy.deepcopy(subgraph_.opt_fields)
fields = ["P"]
fields.append(subgraph_.sub_id)
fields.append(_serialize_subgraph_elements(subgraph_.elements, gfa_))
if 'overlaps' in subgraph_.opt_fields:
opt_fields.pop('overlaps')
fields.append(str.join(",", subgraph_.opt_fields['overlaps'].value))
else:
fields.append("*")
fields.extend(utils._serialize_opt_fields(opt_fields))
if not utils._are_fields_defined(defined_fields) or \
not utils._check_fields(fields[1:], PATH_FIELDS):
raise GFA1SerializationError("Required fields missing or" \
+ " not valid.")
return str.join("\t", fields)
except(KeyError, AttributeError, GFA1SerializationError) as e:
serializer_logger.debug(utils._format_exception(identifier, e))
return ""
################################################################################
# SERIALIZE GRAPH
################################################################################
def serialize_graph(graph, write_header=True):
"""Serialize a networkx.MulitGraph object.
:param graph: A networkx.MultiGraph instance.
:write_header: If set to True put a GFA1 header as first line.
"""
if not isinstance(graph, nx.MultiGraph):
raise ValueError("The object to serialize must be an instance " \
+ "of a networkx.MultiGraph.")
if write_header:
string_serialize = "H\tVN:Z:1.0\n"
for node_id, node in graph.nodes(data=True):
node_serialize = serialize_node(node, node_id)
if len(node_serialize) > 0:
string_serialize += node_serialize + "\n"
for from_node, to_node, key in graph.edges(keys=True):
edge_serialize = serialize_edge(graph.get_edge_data(from_node, to_node, key), key)
if len(edge_serialize) > 0:
string_serialize += edge_serialize + "\n"
return string_serialize
def serialize_gfa(gfa_):
"""Serialize a GFA object into a GFA1 file.
"""
gfa_serialize = serialize_graph(gfa_._graph, write_header=True)
for sub_id, subgraph_ in gfa_.subgraphs().items():
subgraph_serialize = serialize_subgraph(subgraph_, sub_id, gfa_)
if len(subgraph_serialize) > 0:
gfa_serialize += subgraph_serialize + "\n"
return gfa_serialize
if __name__ == '__main__': # pragma: no cover
pass
|
|
"""Representation of Z-Wave binary sensors."""
import logging
from typing import Callable, List, Optional, TypedDict
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import CommandClass
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_CLIENT, DATA_UNSUBSCRIBE, DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .entity import ZWaveBaseEntity
LOGGER = logging.getLogger(__name__)
NOTIFICATION_SMOKE_ALARM = 1
NOTIFICATION_CARBON_MONOOXIDE = 2
NOTIFICATION_CARBON_DIOXIDE = 3
NOTIFICATION_HEAT = 4
NOTIFICATION_WATER = 5
NOTIFICATION_ACCESS_CONTROL = 6
NOTIFICATION_HOME_SECURITY = 7
NOTIFICATION_POWER_MANAGEMENT = 8
NOTIFICATION_SYSTEM = 9
NOTIFICATION_EMERGENCY = 10
NOTIFICATION_CLOCK = 11
NOTIFICATION_APPLIANCE = 12
NOTIFICATION_HOME_HEALTH = 13
NOTIFICATION_SIREN = 14
NOTIFICATION_WATER_VALVE = 15
NOTIFICATION_WEATHER = 16
NOTIFICATION_IRRIGATION = 17
NOTIFICATION_GAS = 18
class NotificationSensorMapping(TypedDict, total=False):
"""Represent a notification sensor mapping dict type."""
type: int # required
states: List[str]
device_class: str
enabled: bool
# Mappings for Notification sensors
# https://github.com/zwave-js/node-zwave-js/blob/master/packages/config/config/notifications.json
NOTIFICATION_SENSOR_MAPPINGS: List[NotificationSensorMapping] = [
{
# NotificationType 1: Smoke Alarm - State Id's 1 and 2 - Smoke detected
"type": NOTIFICATION_SMOKE_ALARM,
"states": ["1", "2"],
"device_class": DEVICE_CLASS_SMOKE,
},
{
# NotificationType 1: Smoke Alarm - All other State Id's
"type": NOTIFICATION_SMOKE_ALARM,
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 2: Carbon Monoxide - State Id's 1 and 2
"type": NOTIFICATION_CARBON_MONOOXIDE,
"states": ["1", "2"],
"device_class": DEVICE_CLASS_GAS,
},
{
# NotificationType 2: Carbon Monoxide - All other State Id's
"type": NOTIFICATION_CARBON_MONOOXIDE,
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 3: Carbon Dioxide - State Id's 1 and 2
"type": NOTIFICATION_CARBON_DIOXIDE,
"states": ["1", "2"],
"device_class": DEVICE_CLASS_GAS,
},
{
# NotificationType 3: Carbon Dioxide - All other State Id's
"type": NOTIFICATION_CARBON_DIOXIDE,
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 4: Heat - State Id's 1, 2, 5, 6 (heat/underheat)
"type": NOTIFICATION_HEAT,
"states": ["1", "2", "5", "6"],
"device_class": DEVICE_CLASS_HEAT,
},
{
# NotificationType 4: Heat - All other State Id's
"type": NOTIFICATION_HEAT,
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 5: Water - State Id's 1, 2, 3, 4
"type": NOTIFICATION_WATER,
"states": ["1", "2", "3", "4"],
"device_class": DEVICE_CLASS_MOISTURE,
},
{
# NotificationType 5: Water - All other State Id's
"type": NOTIFICATION_WATER,
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 6: Access Control - State Id's 1, 2, 3, 4 (Lock)
"type": NOTIFICATION_ACCESS_CONTROL,
"states": ["1", "2", "3", "4"],
"device_class": DEVICE_CLASS_LOCK,
},
{
# NotificationType 6: Access Control - State Id 16 (door/window open)
"type": NOTIFICATION_ACCESS_CONTROL,
"states": ["22"],
"device_class": DEVICE_CLASS_DOOR,
},
{
# NotificationType 6: Access Control - State Id 17 (door/window closed)
"type": NOTIFICATION_ACCESS_CONTROL,
"states": ["23"],
"enabled": False,
},
{
# NotificationType 7: Home Security - State Id's 1, 2 (intrusion)
"type": NOTIFICATION_HOME_SECURITY,
"states": ["1", "2"],
"device_class": DEVICE_CLASS_SAFETY,
},
{
# NotificationType 7: Home Security - State Id's 3, 4, 9 (tampering)
"type": NOTIFICATION_HOME_SECURITY,
"states": ["3", "4", "9"],
"device_class": DEVICE_CLASS_SAFETY,
},
{
# NotificationType 7: Home Security - State Id's 5, 6 (glass breakage)
"type": NOTIFICATION_HOME_SECURITY,
"states": ["5", "6"],
"device_class": DEVICE_CLASS_SAFETY,
},
{
# NotificationType 7: Home Security - State Id's 7, 8 (motion)
"type": NOTIFICATION_HOME_SECURITY,
"states": ["7", "8"],
"device_class": DEVICE_CLASS_MOTION,
},
{
# NotificationType 9: System - State Id's 1, 2, 6, 7
"type": NOTIFICATION_SYSTEM,
"states": ["1", "2", "6", "7"],
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 10: Emergency - State Id's 1, 2, 3
"type": NOTIFICATION_EMERGENCY,
"states": ["1", "2", "3"],
"device_class": DEVICE_CLASS_PROBLEM,
},
{
# NotificationType 14: Siren
"type": NOTIFICATION_SIREN,
"states": ["1"],
"device_class": DEVICE_CLASS_SOUND,
},
{
# NotificationType 18: Gas
"type": NOTIFICATION_GAS,
"states": ["1", "2", "3", "4"],
"device_class": DEVICE_CLASS_GAS,
},
{
# NotificationType 18: Gas
"type": NOTIFICATION_GAS,
"states": ["6"],
"device_class": DEVICE_CLASS_PROBLEM,
},
]
PROPERTY_DOOR_STATUS = "doorStatus"
class PropertySensorMapping(TypedDict, total=False):
"""Represent a property sensor mapping dict type."""
property_name: str # required
on_states: List[str] # required
device_class: str
enabled: bool
# Mappings for property sensors
PROPERTY_SENSOR_MAPPINGS: List[PropertySensorMapping] = [
{
"property_name": PROPERTY_DOOR_STATUS,
"on_states": ["open"],
"device_class": DEVICE_CLASS_DOOR,
"enabled": True,
},
]
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Z-Wave binary sensor from config entry."""
client: ZwaveClient = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
@callback
def async_add_binary_sensor(info: ZwaveDiscoveryInfo) -> None:
"""Add Z-Wave Binary Sensor."""
entities: List[BinarySensorEntity] = []
if info.platform_hint == "notification":
# Get all sensors from Notification CC states
for state_key in info.primary_value.metadata.states:
# ignore idle key (0)
if state_key == "0":
continue
entities.append(
ZWaveNotificationBinarySensor(config_entry, client, info, state_key)
)
elif info.platform_hint == "property":
entities.append(ZWavePropertyBinarySensor(config_entry, client, info))
else:
# boolean sensor
entities.append(ZWaveBooleanBinarySensor(config_entry, client, info))
async_add_entities(entities)
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass,
f"{DOMAIN}_{config_entry.entry_id}_add_{BINARY_SENSOR_DOMAIN}",
async_add_binary_sensor,
)
)
class ZWaveBooleanBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor."""
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
) -> None:
"""Initialize a ZWaveBooleanBinarySensor entity."""
super().__init__(config_entry, client, info)
self._name = self.generate_name(include_value_name=True)
@property
def is_on(self) -> Optional[bool]:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return bool(self.info.primary_value.value)
@property
def device_class(self) -> Optional[str]:
"""Return device class."""
if self.info.primary_value.command_class == CommandClass.BATTERY:
return DEVICE_CLASS_BATTERY
return None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
if self.info.primary_value.command_class == CommandClass.SENSOR_BINARY:
# Legacy binary sensors are phased out (replaced by notification sensors)
# Disable by default to not confuse users
if self.info.node.device_class.generic != "Binary Sensor":
return False
return True
class ZWaveNotificationBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor from Notification CommandClass."""
def __init__(
self,
config_entry: ConfigEntry,
client: ZwaveClient,
info: ZwaveDiscoveryInfo,
state_key: str,
) -> None:
"""Initialize a ZWaveNotificationBinarySensor entity."""
super().__init__(config_entry, client, info)
self.state_key = state_key
self._name = self.generate_name(
include_value_name=True,
alternate_value_name=self.info.primary_value.property_name,
additional_info=[self.info.primary_value.metadata.states[self.state_key]],
)
# check if we have a custom mapping for this value
self._mapping_info = self._get_sensor_mapping()
@property
def is_on(self) -> Optional[bool]:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return int(self.info.primary_value.value) == int(self.state_key)
@property
def device_class(self) -> Optional[str]:
"""Return device class."""
return self._mapping_info.get("device_class")
@property
def unique_id(self) -> str:
"""Return unique id for this entity."""
return f"{super().unique_id}.{self.state_key}"
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
if not self._mapping_info:
return True
return self._mapping_info.get("enabled", True)
@callback
def _get_sensor_mapping(self) -> NotificationSensorMapping:
"""Try to get a device specific mapping for this sensor."""
for mapping in NOTIFICATION_SENSOR_MAPPINGS:
if (
mapping["type"]
!= self.info.primary_value.metadata.cc_specific["notificationType"]
):
continue
if not mapping.get("states") or self.state_key in mapping["states"]:
# match found
return mapping
return {}
class ZWavePropertyBinarySensor(ZWaveBaseEntity, BinarySensorEntity):
"""Representation of a Z-Wave binary_sensor from a property."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a ZWavePropertyBinarySensor entity."""
super().__init__(config_entry, client, info)
# check if we have a custom mapping for this value
self._mapping_info = self._get_sensor_mapping()
self._name = self.generate_name(include_value_name=True)
@property
def is_on(self) -> Optional[bool]:
"""Return if the sensor is on or off."""
if self.info.primary_value.value is None:
return None
return self.info.primary_value.value in self._mapping_info["on_states"]
@property
def device_class(self) -> Optional[str]:
"""Return device class."""
return self._mapping_info.get("device_class")
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# We hide some more advanced sensors by default to not overwhelm users
# unless explicitly stated in a mapping, assume deisabled by default
return self._mapping_info.get("enabled", False)
@callback
def _get_sensor_mapping(self) -> PropertySensorMapping:
"""Try to get a device specific mapping for this sensor."""
mapping_info = PropertySensorMapping()
for mapping in PROPERTY_SENSOR_MAPPINGS:
if mapping["property_name"] == self.info.primary_value.property_name:
mapping_info = mapping.copy()
break
return mapping_info
|
|
#!/usr/bin/env python
#
# Copyright (c) 2014 The pblcache Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import rrdtool
import os
import sys
if len(sys.argv) == 1:
print "%s <pblio stat file>" % sys.argv[0]
sys.exit(1)
pbliodata = sys.argv[1]
if not os.path.exists(pbliodata):
print "File %s does not exist" % pbliodata
sys.exit(1)
# Default input is every 5 secs
Period=60
# Convert from pblio.data -> pblio.csv
fp = open(pbliodata, 'r')
line = fp.readline()
jsondata = json.loads(line)
fp.close()
# Setup info
data_sources = ['DS:tlat_d:COUNTER:600:0:U',
'DS:tlat_c:COUNTER:600:0:U',
'DS:treadlat_d:COUNTER:600:0:U',
'DS:treadlat_c:COUNTER:600:0:U',
'DS:twritelat_d:COUNTER:600:0:U',
'DS:twritelat_c:COUNTER:600:0:U',
'DS:cache_insertions:COUNTER:600:0:U',
'DS:cache_evictions:COUNTER:600:0:U',
'DS:cache_hits:COUNTER:600:0:U',
'DS:cache_reads:COUNTER:600:0:U',
'DS:cache_ihits:COUNTER:600:0:U',
'DS:cache_invals:COUNTER:600:0:U',
'DS:reads:COUNTER:600:0:U',
'DS:total:COUNTER:600:0:U',
'DS:asu1_rl_d:COUNTER:600:0:U',
'DS:asu1_rl_c:COUNTER:600:0:U',
'DS:asu2_rl_d:COUNTER:600:0:U',
'DS:asu2_rl_c:COUNTER:600:0:U',
]
# Create db
rrdtool.create('pblio.rrd',
'--start', "%d" % (jsondata['time']-Period),
'--step', '%d' % (Period),
data_sources,
'RRA:LAST:0.5:1:2600')
# Open JSON data file
fp = open(pbliodata, 'r')
# Cover data
start_time=jsondata['time']
end_time=0
cache_items=0
for line in fp.readlines():
stat = json.loads(line)
# Calculations
tlat_d = int(stat['spc']['total']['latency']['duration']/1000)
tlat_c = stat['spc']['total']['latency']['count']
treadlat_d = int(stat['spc']['read']['latency']['duration']/1000)
treadlat_c = stat['spc']['read']['latency']['count']
twritelat_d = int(stat['spc']['write']['latency']['duration']/1000)
twritelat_c = stat['spc']['write']['latency']['count']
reads = stat['spc']['asu'][0]['read']['blocks'] + stat['spc']['asu'][1]['read']['blocks']
total = stat['spc']['asu'][0]['total']['blocks'] + stat['spc']['asu'][1]['total']['blocks']
asu1_rl_d = int(stat['spc']['asu'][0]['read']['latency']['duration']/1000)
asu1_rl_c = stat['spc']['asu'][0]['read']['latency']['count']
asu2_rl_d = int(stat['spc']['asu'][1]['read']['latency']['duration']/1000)
asu2_rl_c = stat['spc']['asu'][1]['read']['latency']['count']
# Get cache
try:
cache_hits = stat['cache']['readhits']
cache_reads = stat['cache']['reads']
cache_ihits = stat['cache']['invalidatehits']
cache_invals = stat['cache']['invalidations']
cache_insertions = stat['cache']['insertions']
cache_evictions = stat['cache']['evictions']
except:
cache_hits = 0
cache_reads = 0
cache_ihits = 0
cache_invals = 0
cache_insertions = 0
cache_evictions = 0
# Enter into rrd
rrdtool.update('pblio.rrd',
("%d:" % stat['time']) +
("%d:" % tlat_d)+
("%d:" % tlat_c)+
("%d:" % treadlat_d)+
("%d:" % treadlat_c)+
("%d:" % twritelat_d)+
("%d:" % twritelat_c)+
("%d:" % cache_insertions)+
("%d:" % cache_evictions)+
("%d:" % cache_hits)+
("%d:" % cache_reads)+
("%d:" % cache_ihits)+
("%d:" % cache_invals)+
("%d:" % reads)+
("%d:" % total)+
("%d:" % asu1_rl_d)+
("%d:" % asu1_rl_c)+
("%d:" % asu2_rl_d)+
("%d" % asu2_rl_c))
# Save the end time for graphs
end_time = stat['time']
fp.close()
# Graph Total Latency
rrdtool.graph('tlat.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=Total Latency',
'--vertical-label=Time (ms)',
'DEF:duration=pblio.rrd:tlat_d:LAST',
'DEF:count=pblio.rrd:tlat_c:LAST',
'DEF:readduration=pblio.rrd:treadlat_d:LAST',
'DEF:readcount=pblio.rrd:treadlat_c:LAST',
'DEF:writeduration=pblio.rrd:twritelat_d:LAST',
'DEF:writecount=pblio.rrd:twritelat_c:LAST',
'CDEF:latms=duration,count,/,1000,/',
'CDEF:readlatms=readduration,readcount,/,1000,/',
'CDEF:writelatms=writeduration,writecount,/,1000,/',
'LINE2:latms#FF0000:Total Latency',
'LINE2:readlatms#00FF00:Read Total Latency',
'LINE2:writelatms#0000FF:Write Total Latency')
# Graph ASU1 and ASU2 Read Latency
rrdtool.graph('asu_read_latency.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=ASU Read Latency',
'--vertical-label=Time (ms)',
'DEF:asu1d=pblio.rrd:asu1_rl_d:LAST',
'DEF:asu1c=pblio.rrd:asu1_rl_c:LAST',
'DEF:asu2d=pblio.rrd:asu2_rl_d:LAST',
'DEF:asu2c=pblio.rrd:asu2_rl_c:LAST',
'CDEF:asu1l=asu1d,asu1c,/,1000,/',
'CDEF:asu2l=asu2d,asu2c,/,1000,/',
'LINE2:asu1l#FF0000:ASU1 Read Latency',
'LINE2:asu2l#00FF00:ASU2 Read Latency')
# Graph Read Hit Rate
rrdtool.graph('readhit.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=Cache Read Hit Percentage',
'--vertical-label=Percentage',
'DEF:hits=pblio.rrd:cache_hits:LAST',
'DEF:reads=pblio.rrd:cache_reads:LAST',
'DEF:ihits=pblio.rrd:cache_ihits:LAST',
'DEF:invals=pblio.rrd:cache_invals:LAST',
'CDEF:readhit=hits,reads,/,100,*',
'CDEF:invalhit=ihits,invals,/,100,*',
'LINE2:invalhit#00FF00:Invalidate Hit',
'LINE2:readhit#FF0000:Cache Read Hit')
# Graph Read Percentage
rrdtool.graph('readp.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=Pblio Read Percentage',
'--vertical-label=Percentage',
'DEF:total=pblio.rrd:total:LAST',
'DEF:reads=pblio.rrd:reads:LAST',
'CDEF:readp=reads,total,/,100,*',
'LINE2:readp#FF0000:Read Percentage')
# Graph cache I/O
rrdtool.graph('insertions.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=Insertions, Evictions, and Invalidations',
'--vertical-label=Number of Blocks',
'DEF:evictions=pblio.rrd:cache_evictions:LAST',
'DEF:insertions=pblio.rrd:cache_insertions:LAST',
'DEF:invalidatehits=pblio.rrd:cache_ihits:LAST',
'LINE2:insertions#0FFF00:Insertions',
'LINE2:invalidatehits#000FFF:Invalidation Hits',
'LINE2:evictions#FFF000:Evictions')
# Storage IOPS Reduction
rrdtool.graph('iops_reduction.png',
'--start', '%d' % start_time,
'--end', '%d' % end_time,
'-w 800',
'-h 400',
'--title=Storage System Total IOPS Reduction',
'--vertical-label=Percentage',
'DEF:writes=pblio.rrd:cache_invals:LAST',
'DEF:reads=pblio.rrd:cache_reads:LAST',
'DEF:readhits=pblio.rrd:cache_hits:LAST',
'CDEF:reduction=readhits,reads,writes,+,/,100,*',
'LINE2:reduction#FF0000:Total IOPS Reduction Percentage')
print "Start Time = %d" % (start_time)
print "End Time = %d" % (end_time)
|
|
import sys
import math
Pi = 3.141592653
R = 8.3144621
dt = 0.00000000001
nsElements = 5
nsLength = 1.0
Bore = 0.14
Stroke = 0.14
PortArea = Pi*(Bore/4)**2
Compression = 6.7
Gamma = 7.0/5.0
Alpha = Gamma/(Gamma-1)
airMolMass = 0.02897
portLength = 0.01
RotationRate = 1000 * 0.104719755
timeToRotate = 2*Pi * RotationRate
CompressedVolume = ((Pi/4.0)*Bore**2*Stroke)/(Compression-1)
Volume = CompressedVolume
AtmosphericPressure = 101325.0
AtmosphericDensity = 1.225
Pressure = AtmosphericPressure
Density = AtmosphericDensity
Temperature = 300
Number = (Pressure*Volume)/(R*Temperature)
nsCoeff = dt/(2*nsLength/nsElements)
Theta = 0
DistanceTravelled = 0
outFile = open("output.txt", "w")
outFile.write("Theta, Volume, Mass, Pressure, Density, Temperature, Number\n")
Mass = Volume * Density
outFile.write(str(Theta) + ", " + str(Volume) + ", " + str(Mass) + ", " + str(Pressure) + ", " + str(Density) + ", " + str(Temperature) + ", " + str(Number) +"\n")
#Set up ICs
VelocityField = []
PressureField = []
DensityField = []
numberFlow = []
numberField = []
MeshVolume = (PortArea * nsLength/nsElements)
StepLength = nsLength/float(nsElements)
for meshPoint in range(0, nsElements):
VelocityField.append(0)
PressureField.append(0)
DensityField.append(Density)
numberField.append(AtmosphericPressure*MeshVolume/(R*Temperature))
numberFlow.append(0)
timeElapsed = 0
i = 0
velFile = open("vel.txt", "w")
presFile = open("pressure.txt", "w")
while timeElapsed < timeToRotate:
i += 1
timeElapsed += dt
Theta += dt*RotationRate
DistanceTravelled = (1+math.sin(Theta-Pi/2.0))/2.0
Mass = Volume * Density
OldVolume = Volume
Volume = CompressedVolume + DistanceTravelled * Stroke * Pi * (Bore/2.0)**2
if Theta < Pi:
#Induction
Pressure = (Volume/OldVolume)**-Gamma * Pressure
Density = AtmosphericDensity
Temperature = Pressure * Volume/(Number * R)
#Solve NS
VelocityField[0] = VelocityField[1]
VelocityField[nsElements-1] = 0
NewVelField = []
NewDensField = []
#inner boundary condition
jin = DensityField[0]*VelocityField[0]
jinm1 = 0
jinp1 = DensityField[1]*VelocityField[1]
rhoin = DensityField[0]
Pinm1 = Pressure - AtmosphericPressure
uinm1 = 0
Pinp1 = PressureField[1]
uinp1 = VelocityField[1]
rhoip1n = nsCoeff*(jinm1-jinp1) + rhoin
jip1n = nsCoeff*((jinm1*uinm1+Pinm1) - (jinp1*uinp1+Pinp1)) + jin
# print("------------------------------")
# print("rho")
# print(rhoin)
# print("drho")
# print(nsCoeff * (jinm1-jinp1))
# print("j")
# print(jin)
# print("dj")
# print(nsCoeff*((jinm1*uinm1+Pinm1) - (jinp1*uinp1+Pinp1)))
# print("based on")
# print(nsCoeff, jinm1, uinm1, Pinm1, jinp1, uinp1, Pinp1)
NewDensField.append(rhoip1n)
NewVelField.append(jip1n/rhoip1n)
#internal points
for n in range(1, nsElements-1):
jin = DensityField[n]*VelocityField[n]
jinm1 = DensityField[n-1]*VelocityField[n-1]
jinp1 = DensityField[n+1]*VelocityField[n+1]
rhoin = DensityField[n]
Pinm1 = PressureField[n-1]
uinm1 = VelocityField[n-1]
Pinp1 = PressureField[n+1]
uinp1 = VelocityField[n+1]
rhoip1n = nsCoeff*(jinm1-jinp1) + rhoin
jip1n = nsCoeff*((jinm1*uinm1+Pinm1) - (jinp1*uinp1+Pinp1)) + jin
# print("------------------------------")
# print("rho")
# print(rhoin)
# print("drho")
# print(nsCoeff * (jinm1-jinp1))
# print("j")
# print(jin)
# print("dj")
# print(nsCoeff*((jinm1*uinm1+Pinm1) - (jinp1*uinp1+Pinp1)))
# print("based on")
# print(nsCoeff, jinm1, uinm1, Pinm1, jinp1, uinp1, Pinp1)
NewDensField.append(rhoip1n)
NewVelField.append(jip1n/rhoip1n)
#outer boundary condition
NewVelField.append(0)
NewDensField.append(AtmosphericDensity)
#update velocity and density via euler equations
VelocityField = NewVelField
DensityField = NewDensField
#update pressure and number fields
#inner boundary (into cylinder)
Number += Density*VelocityField[0]*PortArea/airMolMass
Pressure = Number*R*Temperature/Volume
for n in range(0, nsElements):
numberFlow[n] = DensityField[n]*VelocityField[n]*PortArea/airMolMass
# print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
# for n in range(0, nsElements):
# print(numberFlow[n])
for n in range(0, nsElements-1):
numberField[n] += (numberFlow[n]-numberFlow[n+1])
PressureField[n] = numberField[n]*R*300/MeshVolume - AtmosphericPressure
for meshPoint in range(0, nsElements):
presFile.write(str(PressureField[meshPoint]) + " ")
velFile.write(str(VelocityField[meshPoint]) + " ")
presFile.write("\n")
velFile.write("\n")
# print("===================================")
# for n in range(0, nsElements):
# print(VelocityField[n], DensityField[n], PressureField[n], numberField[n])
# try:
# input("press enter")
# except SyntaxError:
# pass
for n in range(0, nsElements-1):
if numberField[n] < 0:
print("cell has lost more than it had, restart with smaller timestep")
print(i, n)
outFile.close()
presFile.close()
velFile.close()
sys.exit()
elif Theta < 2*Pi:
presFile.close()
velFile.close()
#Compression
Pressure = (Volume/OldVolume)**-Gamma * Pressure
Temperature = Pressure * Volume/(Number * R)
Density = Mass / Volume
elif Theta < 3*Pi:
#Combustion/Expansion
Pressure = (Volume/OldVolume)**-Gamma * Pressure
Temperature = Pressure * Volume/(Number * R)
Density = Mass / Volume
else:
#Exhaust
Pressure = (Volume/OldVolume)**-Gamma * Pressure
Density = Mass / Volume
Temperature = Pressure * Volume/(Number * R)
#Put NS solver here
outFile.write(str(Theta) + ", " + str(Volume) + ", " + str(Mass) + ", " + str(Pressure) + ", " + str(Density) + ", " + str(Temperature) + ", " + str(Number) +"\n")
|
|
import unittest
import logging
import sys
import datetime
from decimal import Decimal
from tigershark.facade import f835
from tigershark.parsers import M835_4010_X091_A1
class TestParsed835(unittest.TestCase):
def setUp(self):
m = M835_4010_X091_A1.parsed_835
with open('tests/835-example-2.txt') as f:
parsed = m.unmarshall(f.read().strip())
self.f = f835.F835_4010(parsed)
## Header ##
def test_financial_information(self):
fi = self.f.facades[0].header.financial_information
self.assertEqual(fi.transaction_type,
('I', 'Remittance Information Only'))
self.assertEqual(fi.amount, Decimal('12345.67'))
self.assertEqual(fi.credit_or_debit, ('C', 'Credit'))
self.assertEqual(fi.payment_method,
('ACH', 'Automated Clearing House (ACH)'))
self.assertEqual(fi.payment_format,
('CCP', 'Cash Concentration/Disbursement Plus Addenda'))
self.assertEqual(fi.sender_aba_transit_routing_number, '999999999')
self.assertEqual(fi.sender_canadian_bank_branch_and_institution_number,
None)
self.assertEqual(fi.sender_account_type, ('DA', 'Demand Deposit'))
self.assertEqual(fi.sender_bank_account_number, '1234567890')
self.assertEqual(fi.sender_id, '1111111111')
self.assertEqual(fi.sender_supplemental_id, '000011111')
self.assertEqual(fi.receiver_aba_transit_routing_number, '222222222')
self.assertEqual(
fi.receiver_canadian_bank_branch_and_institution_number, None)
self.assertEqual(fi.receiver_account_type, ('DA', 'Demand Deposit'))
self.assertEqual(fi.receiver_bank_account_number, '3333333333')
self.assertEqual(fi.issue_date, datetime.date(2012, 03, 22))
def test_reassociation_trace_number(self):
rtn = self.f.facades[0].header.reassociation_trace_number
self.assertEqual(rtn.trace_type,
('1', 'Current Transaction Trace Numbers'))
self.assertEqual(rtn.check_or_eft_trace_number, '1QG11111111')
self.assertEqual(rtn.payer_id, '1111111111')
self.assertEqual(rtn.originating_company_supplemental_code,
'000011111')
rtn = self.f.facades[1].header.reassociation_trace_number
self.assertEqual(rtn.trace_type,
('1', 'Current Transaction Trace Numbers'))
self.assertEqual(rtn.check_or_eft_trace_number, '1QG11111112')
self.assertEqual(rtn.payer_id, '1111111112')
self.assertEqual(rtn.originating_company_supplemental_code,
'000011112')
def test_production_date(self):
pd = self.f.facades[0].header.production_date
self.assertEqual(pd.date, datetime.date(2012, 03, 19))
pd = self.f.facades[1].header.production_date
self.assertEqual(pd.date, datetime.date(2012, 03, 19))
## PAYER ##
def test_payer(self):
p = self.f.facades[0].payer
c = p.contact_details
self.assertEqual(c.name, 'UNITED HEALTHCARE INSURANCE COMPANY')
self.assertEqual(c.id_qualifier,
('XV', 'Health Care Financing Administration National Plan ID'))
self.assertEqual(c.id, '87726')
self.assertEqual(c.addr1, '9900 BREN ROAD')
self.assertEqual(c.city, 'MINNETONKA')
self.assertEqual(c.state, 'MN')
self.assertEqual(c.zip, '553430000')
self.assertEqual(c.contact_name, 'ATLANTA SERVICE CENTER')
self.assertEqual(c.contact_phone, '8778423210')
self.assertEqual(c.contact_phone_ext, None)
self.assertEqual(p.payer_id, '87726')
p = self.f.facades[1].payer
c = p.contact_details
self.assertEqual(c.name, 'ACME, INC. A WHOLLY OWNED SUBSIDIARY OF '\
'UNITED HEALTHCARE INSURANCE COMPANY')
self.assertEqual(c.id_qualifier,
('XV', 'Health Care Financing Administration National Plan ID'))
self.assertEqual(c.id, '87726')
self.assertEqual(c.addr1, '123 MAIN STREET')
self.assertEqual(c.city, 'ANYTOWN')
self.assertEqual(c.state, 'CA')
self.assertEqual(c.zip, '940660000')
self.assertEqual(p.payer_id, '87726')
## PAYEE ##
def test_payee(self):
def _test():
self.assertEqual(c.name, 'MY CLINIC')
self.assertEqual(c.id_qualifier,
('XX', 'Health Care Financing Administration National '\
'Provider ID'))
self.assertEqual(c.id, '1333333333')
self.assertEqual(c.addr1, '123 HEALTHCARE STREET')
self.assertEqual(c.city, 'SAN FRANCISCO')
self.assertEqual(c.zip, '94109')
self.assertEqual(p.tax_id, '777777777')
p = self.f.facades[0].payee
c = p.contact_details
_test()
p = self.f.facades[1].payee
c = p.contact_details
_test()
## Claims Overview ##
def test_claims_overview(self):
co = self.f.facades[0].claims_overview
self.assertEqual(co.number, '1')
self.assertEqual(co.provider_id, '1333333333')
self.assertEqual(co.facility_type_code, '81')
self.assertEqual(co.fiscal_period_end, datetime.date(2012, 12, 31))
self.assertEqual(co.claim_count, '1')
self.assertEqual(co.total_claim_charge, Decimal('200.02'))
self.assertEqual(co.total_covered_charge, Decimal('200.02'))
co = self.f.facades[1].claims_overview
self.assertEqual(co.number, '1')
self.assertEqual(co.provider_id, '1333333333')
self.assertEqual(co.facility_type_code, '81')
self.assertEqual(co.fiscal_period_end, datetime.date(2012, 12, 31))
self.assertEqual(co.claim_count, '1')
self.assertEqual(co.total_claim_charge, Decimal('23276.56'))
self.assertEqual(co.total_covered_charge, Decimal('12000.65'))
## Claims ##
def test_claims(self):
claims = self.f.facades[0].claims
c = claims[0]
# Claim details
pi = c.payment_info
self.assertEqual(pi.patient_control_number, "001-DDDDDDDDDD")
self.assertEqual(pi.status_code, ('1', 'Processed as Primary'))
self.assertEqual(pi.total_charge, Decimal('200.02'))
self.assertEqual(pi.payment, Decimal('200.02'))
self.assertEqual(pi.patient_responsibility, Decimal('0.0'))
self.assertEqual(pi.claim_type, ('13', 'Point of Service (POS)'))
self.assertEqual(pi.payer_claim_control_number,
'1234567890 0987654321')
self.assertEqual(pi.facility_type, '81')
self.assertEqual(pi.total_covered_charge, Decimal('200.02'))
# Patient
patient = c.patient
self.assertTrue(patient.is_person())
self.assertFalse(patient.is_organization())
self.assertEqual(patient.last_name, "DOE")
self.assertEqual(patient.first_name, "JOHN")
self.assertEqual(patient.middle_initial, "")
self.assertEqual(patient.suffix, "")
# Insured
insured = c.insured
self.assertTrue(insured.is_person())
self.assertFalse(insured.is_organization())
self.assertEqual(insured.last_name, "DOE")
self.assertEqual(insured.first_name, "JANE")
self.assertEqual(insured.middle_initial, "")
self.assertEqual(insured.suffix, "")
self.assertEqual(insured.id_code_qual,
("MI", "Member Identification Number"))
self.assertEqual(insured.id_code, "123111111")
# Corrected Info
corrected = c.corrected_insured
self.assertEqual(corrected.last_name, "DOE")
self.assertEqual(corrected.first_name, "JANE")
self.assertEqual(corrected.middle_initial, "D")
# Service Provider
provider = c.service_provider
self.assertFalse(provider.is_person())
self.assertTrue(provider.is_organization())
self.assertEqual(provider.org_name, "MY CLINIC")
self.assertEqual(provider.id_code_qual, (
("XX", "Health Care Financing Administration National "\
"Provider Identifier")))
self.assertEqual(provider.id_code, '1333333333')
# Other stuff
self.assertEqual(c.group_or_policy_number, '5G5G5G')
self.assertEqual(c.contract_class, 'CHOYC+')
# If a received date isn't supplied, don't die just return None!
self.assertEqual(c.date_received, None)
self.assertEqual(c.date_statement_period_start,
datetime.date(2012, 02, 22))
self.assertEqual(c.claim_adjustments.patient_responsibility.amount_1,
Decimal('0.0'))
self.assertEqual(c.claim_adjustments.contractual_obligation.amount_1,
Decimal('0.0'))
# Line item charges
l = c.line_items[0]
self.assertEqual(l.hcpcs_code[0], '88888')
self.assertEqual(l.charge, Decimal('200.02'))
self.assertEqual(l.payment, Decimal('200.02'))
self.assertEqual(l.quantity, '1')
self.assertEqual(l.service_date, datetime.date(2012, 02, 22))
self.assertEqual(l.provider_control_number, '251111111111')
self.assertEqual(l.allowed_amount, Decimal('200.02'))
self.assertEqual(l.claim_adjustments.patient_responsibility.amount_1,
Decimal('0.0'))
self.assertEqual(l.claim_adjustments.contractual_obligation.amount_1,
Decimal('0.0'))
# Second claim!
claims = self.f.facades[1].claims
c = claims[0]
# Claim details
pi = c.payment_info
self.assertEqual(pi.patient_control_number, "001-SSSSSSSSST")
self.assertEqual(pi.status_code, ('1', 'Processed as Primary'))
self.assertEqual(pi.total_charge, Decimal('23276.56'))
self.assertEqual(pi.payment, Decimal('12000.65'))
self.assertEqual(pi.patient_responsibility, Decimal('145.0'))
self.assertEqual(pi.claim_type,
("14", "Exclusive Provider Organization (EPO)"))
self.assertEqual(pi.payer_claim_control_number,
'2234567891 1987654322')
self.assertEqual(pi.facility_type, '81')
self.assertEqual(pi.total_covered_charge, Decimal('12145.65'))
# Patient
patient = c.patient
self.assertTrue(patient.is_person())
self.assertFalse(patient.is_organization())
self.assertEqual(patient.last_name, "SMITH")
self.assertEqual(patient.first_name, "JOHN")
# Insured
insured = c.insured
self.assertTrue(insured.is_person())
self.assertFalse(insured.is_organization())
self.assertEqual(insured.last_name, "SMITH")
self.assertEqual(insured.first_name, "JANE")
self.assertEqual(insured.middle_initial, "")
self.assertEqual(insured.suffix, "")
self.assertEqual(insured.id_code_qual,
("MI", "Member Identification Number"))
self.assertEqual(insured.id_code, "123222222")
# Corrected Info
corrected = c.corrected_insured
self.assertEqual(corrected.last_name, "SMITH")
self.assertEqual(corrected.first_name, "JANE")
self.assertEqual(corrected.middle_initial, "A")
# Other stuff
self.assertEqual(c.group_or_policy_number, '717171')
self.assertEqual(c.contract_class, 'CHOYC')
self.assertEqual(c.date_received, datetime.date(2012, 03, 03))
self.assertEqual(c.date_statement_period_start,
datetime.date(2012, 02, 23))
self.assertEqual(c.claim_adjustments.patient_responsibility.amount_1,
Decimal('145.0'))
# Don't look into the child loops for segments
self.assertEqual(c.claim_adjustments.contractual_obligation.amount_1,
Decimal('0.0'))
# Line item charges
l = c.line_items[0]
self.assertEqual(l.hcpcs_code[0], '88888')
self.assertEqual(l.charge, Decimal('23276.56'))
self.assertEqual(l.payment, Decimal('12145.65'))
self.assertEqual(l.quantity, '1')
self.assertEqual(l.service_date, datetime.date(2012, 02, 21))
self.assertEqual(l.provider_control_number, '252222222222')
self.assertEqual(l.claim_adjustments.contractual_obligation.amount_1,
Decimal('11130.91'))
self.assertEqual(
l.claim_adjustments.contractual_obligation.total_amount(),
Decimal('11130.91'))
self.assertEqual(l.claim_adjustments.patient_responsibility.amount_1,
Decimal('0.0'))
self.assertEqual(l.allowed_amount, Decimal('12145.65'))
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
)
unittest.main()
|
|
#
# Info.py -- FITS Info plugin for the Ginga fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import gtk
import numpy
from ginga.gtkw import GtkHelp
from ginga.misc import Bunch
from ginga import GingaPlugin
class Info(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Info, self).__init__(fv)
self.channel = {}
self.active = None
self.info = None
#self.w = Bunch.Bunch()
self.w.tooltips = self.fv.w.tooltips
fv.set_callback('add-channel', self.add_channel)
fv.set_callback('delete-channel', self.delete_channel)
fv.set_callback('field-info', self.field_info)
fv.set_callback('active-image', self.focus_cb)
def build_gui(self, container):
nb = gtk.Notebook()
nb.set_group_id(-30)
nb.set_tab_pos(gtk.POS_BOTTOM)
nb.set_scrollable(False)
nb.set_show_tabs(False)
nb.set_show_border(False)
nb.show()
self.nb = nb
container.pack_start(self.nb, fill=True, expand=True)
def _create_info_window(self):
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
vbox = gtk.VBox()
captions = (('Name', 'label'), ('Object', 'label'),
('X', 'label'), ('Y', 'label'), ('Value', 'label'),
('RA', 'label'), ('DEC', 'label'),
('Equinox', 'label'), ('Dimensions', 'label'),
#('Slices', 'label', 'MultiDim', 'button'),
('Min', 'label'), ('Max', 'label'),
('Zoom', 'label'),
('Cut Low', 'xlabel', '@Cut Low', 'entry'),
('Cut High', 'xlabel', '@Cut High', 'entry'),
('Auto Levels', 'button', 'Cut Levels', 'button'),
('Cut New', 'label'), ('Zoom New', 'label'),
('Preferences', 'button'),
)
w, b = GtkHelp.build_info(captions)
self.w.tooltips.set_tip(b.cut_levels, "Set cut levels manually")
self.w.tooltips.set_tip(b.auto_levels, "Set cut levels by algorithm")
self.w.tooltips.set_tip(b.cut_low, "Set low cut level (press Enter)")
self.w.tooltips.set_tip(b.cut_high, "Set high cut level (press Enter)")
self.w.tooltips.set_tip(b.preferences, "Set preferences for this channel")
#self.w.tooltips.set_tip(b.multidim, "View other HDUs or slices")
vbox.pack_start(w, padding=0, fill=True, expand=True)
# Convenience navigation buttons
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_CENTER)
btns.set_spacing(3)
btns.set_child_size(15, -1)
bw = Bunch.Bunch()
for tup in (
#("Load", 'button', 'fits_open_48', "Open an image file"),
("Prev", 'button', 'prev_48', "Go to previous image"),
("Next", 'button', 'next_48', "Go to next image"),
("Zoom In", 'button', 'zoom_in_48', "Zoom in"),
("Zoom Out", 'button', 'zoom_out_48', "Zoom out"),
("Zoom Fit", 'button', 'zoom_fit_48', "Zoom to fit window size"),
("Zoom 1:1", 'button', 'zoom_100_48', "Zoom to 100% (1:1)"),
#("Quit", 'button', 'exit_48', "Quit the program"),
):
btn = self.fv.make_button(*tup)
name = tup[0]
if tup[3]:
self.w.tooltips.set_tip(btn, tup[3])
bw[GtkHelp._name_mangle(name, pfx='btn_')] = btn
btns.pack_end(btn, padding=4)
#self.w.btn_load.connect("clicked", lambda w: self.gui_load_file())
bw.btn_prev.connect("clicked", lambda w: self.fv.prev_img())
bw.btn_next.connect("clicked", lambda w: self.fv.next_img())
bw.btn_zoom_in.connect("clicked", lambda w: self.fv.zoom_in())
bw.btn_zoom_out.connect("clicked", lambda w: self.fv.zoom_out())
bw.btn_zoom_fit.connect("clicked", lambda w: self.fv.zoom_fit())
bw.btn_zoom_1_1.connect("clicked", lambda w: self.fv.zoom_1_to_1())
vbox.pack_start(btns, padding=4, fill=True, expand=False)
vbox.show_all()
sw.add_with_viewport(vbox)
sw.set_size_request(-1, 420)
sw.show_all()
return sw, b
def add_channel(self, viewer, chinfo):
sw, winfo = self._create_info_window()
chname = chinfo.name
self.nb.append_page(sw, gtk.Label(chname))
index = self.nb.page_num(sw)
info = Bunch.Bunch(widget=sw, winfo=winfo,
nbindex=index)
self.channel[chname] = info
winfo.cut_low.connect('activate', self.cut_levels,
chinfo.fitsimage, info)
winfo.cut_high.connect('activate', self.cut_levels,
chinfo.fitsimage, info)
winfo.cut_levels.connect('clicked', self.cut_levels,
chinfo.fitsimage, info)
winfo.auto_levels.connect('clicked', self.auto_levels,
chinfo.fitsimage, info)
winfo.preferences.connect('clicked', self.preferences,
chinfo)
fitsimage = chinfo.fitsimage
fitsimage.set_callback('image-set', self.new_image_cb, info)
fitsimage.set_callback('cut-set', self.cutset_cb, info)
fitsimage.set_callback('zoom-set', self.zoomset_cb, info)
fitsimage.set_callback('autocuts', self.autocuts_cb, info)
fitsimage.set_callback('autozoom', self.autozoom_cb, info)
def delete_channel(self, viewer, chinfo):
self.logger.debug("TODO: delete channel %s" % (chinfo.name))
# CALLBACKS
def new_image_cb(self, fitsimage, image, info):
self.set_info(info, fitsimage)
return True
def focus_cb(self, viewer, fitsimage):
chname = self.fv.get_channelName(fitsimage)
chinfo = self.fv.get_channelInfo(chname)
chname = chinfo.name
print "info focus cb: chname=%s" % (chname)
if self.active != chname:
index = self.channel[chname].nbindex
self.nb.set_current_page(index)
self.active = chname
self.info = self.channel[self.active]
print "Switched page to %d" % (index)
self.set_info(self.info, fitsimage)
return True
def zoomset_cb(self, fitsimage, zoomlevel, scale_x, scale_y, info):
"""This callback is called when the main window is zoomed.
"""
# Set text showing zoom factor (1X, 2X, etc.)
if scale_x == scale_y:
text = self.fv.scale2text(scale_x)
else:
textx = self.fv.scale2text(scale_x)
texty = self.fv.scale2text(scale_y)
text = "X: %s Y: %s" % (textx, texty)
info.winfo.zoom.set_text(text)
def cutset_cb(self, fitsimage, loval, hival, info):
#info.winfo.cut_low.set_text('%.2f' % (loval))
info.winfo.lbl_cut_low.set_text('%.2f' % (loval))
#info.winfo.cut_high.set_text('%.2f' % (hival))
info.winfo.lbl_cut_high.set_text('%.2f' % (hival))
def autocuts_cb(self, fitsimage, option, info):
info.winfo.cut_new.set_text(option)
def autozoom_cb(self, fitsimage, option, info):
info.winfo.zoom_new.set_text(option)
# LOGIC
def preferences(self, w, chinfo):
self.fv.start_operation('Preferences')
return True
def set_info(self, info, fitsimage):
image = fitsimage.get_image()
header = image.get_header()
# Update info panel
name = image.get('name', 'Noname')
info.winfo.name.set_text(name)
objtext = header.get('OBJECT', 'UNKNOWN')
info.winfo.object.set_text(objtext)
equinox = header.get('EQUINOX', '')
info.winfo.equinox.set_text(str(equinox))
# Show min, max values
width, height = fitsimage.get_data_size()
minval, maxval = image.get_minmax(noinf=False)
info.winfo.max.set_text(str(maxval))
info.winfo.min.set_text(str(minval))
# Show cut levels
loval, hival = fitsimage.get_cut_levels()
#info.winfo.cut_low.set_text('%.2f' % (loval))
info.winfo.lbl_cut_low.set_text('%.2f' % (loval))
#info.winfo.cut_high.set_text('%.2f' % (hival))
info.winfo.lbl_cut_high.set_text('%.2f' % (hival))
# Show dimensions
dim_txt = "%dx%d" % (width, height)
info.winfo.dimensions.set_text(dim_txt)
# update zoom indicator
scalefactor = fitsimage.get_scale()
text = self.fv.scale2text(scalefactor)
info.winfo.zoom.set_text(text)
# update cut new/zoom new indicators
t_ = fitsimage.get_settings()
info.winfo.cut_new.set_text(t_['autocuts'])
info.winfo.zoom_new.set_text(t_['autozoom'])
def field_info(self, viewer, fitsimage, info):
# TODO: can this be made more efficient?
chname = self.fv.get_channelName(fitsimage)
chinfo = self.fv.get_channelInfo(chname)
chname = chinfo.name
obj = self.channel[chname]
obj.winfo.x.set_text("%.3f" % info.x)
obj.winfo.y.set_text("%.3f" % info.y)
obj.winfo.value.set_text(str(info.value))
obj.winfo.ra.set_text(info.ra_txt)
obj.winfo.dec.set_text(info.dec_txt)
def cut_levels(self, w, fitsimage, info):
try:
loval = float(info.winfo.cut_low.get_text())
hival = float(info.winfo.cut_high.get_text())
return fitsimage.cut_levels(loval, hival)
except Exception, e:
self.fv.showStatus("Error cutting levels: %s" % (str(e)))
return True
def auto_levels(self, w, fitsimage, info):
fitsimage.auto_levels()
def __str__(self):
return 'info'
#END
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import unicode_literals
import os
from .utils import TestCase, UTILS_PATH, add_metaclass
from .. import presets
from ..presets import Preset, PresetParser
try:
# Python 2
import ConfigParser as configparser
except ImportError:
import configparser
PRESET_FILES = [
os.path.join(UTILS_PATH, 'build-presets.ini'),
]
PRESET_DEFAULTS = {
'darwin_toolchain_alias': 'Alias',
'darwin_toolchain_bundle_identifier': 'BundleIdentifier',
'darwin_toolchain_display_name': 'DisplayName',
'darwin_toolchain_display_name_short': 'DispalyNameShort',
'darwin_toolchain_version': '1.0',
'darwin_toolchain_xctoolchain_name': 'default',
'extra_swift_args': '',
'install_destdir': '/tmp/install',
'install_symroot': '/tmp/install/symroot',
'install_toolchain_dir': '/tmp/install/toolchain',
'installable_package': '/tmp/install/pkg',
'swift_install_destdir': '/tmp/install/swift',
'symbols_package': '/path/to/symbols/package',
'ndk_path': '/path/to/ndk',
'arm_dir': '/path/to/arm',
}
SAMPLE_PRESET = """
[preset: sample]
# This is a comment
ios
tvos
watchos
test
validation-test
lit-args=-v
compiler-vendor=apple
# The '--' argument is now unnecessary
dash-dash
verbose-build
build-ninja
# Default interpolation
install-symroot=%(install_symroot)s
"""
IGNORED_SECTION = """
[section_name]
random-options=1
"""
MIXIN_ORDER_PRESETS = """
[preset: test_mixin]
first-opt=0
second-opt=1
[preset: test]
first-opt=1
mixin-preset=test_mixin
second-opt=2
"""
INTERPOLATED_PRESET = """
[preset: test]
install-symroot=%(install_symroot)s
"""
DUPLICATE_PRESET_NAMES = """
[preset: test]
ios
[preset: test]
tvos
"""
DUPLICATE_PRESET_OPTIONS = """
[preset: test]
ios
ios
"""
# -----------------------------------------------------------------------------
class TestPreset(TestCase):
def test_format_args(self):
preset = Preset('sample', [('--ios', None), ('--test', '1')])
self.assertEqual(preset.format_args(), ['--ios', '--test=1'])
# -----------------------------------------------------------------------------
class TestPresetParserMeta(type):
"""Metaclass used to dynamically generate test methods to validate all of
the available presets.
"""
def __new__(cls, name, bases, attrs):
preset_parser = PresetParser()
preset_parser.read(PRESET_FILES)
# Generate tests for each preset
for preset_name in preset_parser.preset_names():
test_name = 'test_get_preset_' + preset_name
attrs[test_name] = cls.generate_get_preset_test(
preset_parser, preset_name)
return super(TestPresetParserMeta, cls).__new__(
cls, name, bases, attrs)
@classmethod
def generate_get_preset_test(cls, preset_parser, preset_name):
def test(self):
preset_parser.get_preset(preset_name, vars=PRESET_DEFAULTS)
return test
@add_metaclass(TestPresetParserMeta)
class TestPresetParser(TestCase):
def test_read(self):
parser = PresetParser()
parser.read(PRESET_FILES)
def test_read_invalid_files(self):
parser = PresetParser()
with self.assertRaises(presets.UnparsedFilesError) as cm:
parser.read(['nonsense-presets.ini'])
e = cm.exception
self.assertListEqual(e.filenames, ['nonsense-presets.ini'])
def test_read_file(self):
parser = PresetParser()
with self.assertNotRaises():
parser.read_file(PRESET_FILES[0])
def test_read_string(self):
parser = PresetParser()
parser.read_string(SAMPLE_PRESET)
preset = parser.get_preset('sample', vars={'install_symroot': '/tmp'})
self.assertIsNotNone(preset)
self.assertEqual(preset.name, 'sample')
self.assertListEqual(preset.args, [
('--ios', None),
('--tvos', None),
('--watchos', None),
('--test', None),
('--validation-test', None),
('--lit-args', '-v'),
('--compiler-vendor', 'apple'),
('--verbose-build', None),
('--build-ninja', None),
('--install-symroot', '/tmp')
])
def test_parser_ignores_non_preset_sections(self):
parser = PresetParser()
parser.read_string(IGNORED_SECTION)
self.assertEqual(len(parser._presets), 0)
def test_mixin_expansion_preserves_argument_order(self):
"""Mixins should be expanded in-place.
"""
parser = PresetParser()
parser.read_string(MIXIN_ORDER_PRESETS)
preset = parser.get_preset('test')
self.assertListEqual(preset.format_args(), [
'--first-opt=1',
# Mixin arguments
'--first-opt=0',
'--second-opt=1',
'--second-opt=2',
])
def test_interpolation_error(self):
parser = PresetParser()
parser.read_string(INTERPOLATED_PRESET)
with self.assertRaises(presets.InterpolationError) as cm:
parser.get_preset('test')
e = cm.exception
self.assertEqual(e.preset_name, 'test')
self.assertEqual(e.option, '--install-symroot')
self.assertEqual(e.rawval, '%(install_symroot)s')
self.assertEqual(e.reference, 'install_symroot')
def test_duplicate_option_error(self):
# Skip test if using the Python 2 ConfigParser module
if not hasattr(configparser, 'DuplicateOptionError'):
return
parser = PresetParser()
with self.assertRaises(presets.DuplicateOptionError) as cm:
parser.read_string(DUPLICATE_PRESET_OPTIONS)
e = cm.exception
self.assertEqual(e.preset_name, 'test')
self.assertEqual(e.option, 'ios')
def test_duplicate_preset_error(self):
# Skip test if using the Python 2 ConfigParser module
if not hasattr(configparser, 'DuplicateOptionError'):
return
parser = PresetParser()
with self.assertRaises(presets.DuplicatePresetError) as cm:
parser.read_string(DUPLICATE_PRESET_NAMES)
e = cm.exception
self.assertEqual(e.preset_name, 'test')
def test_get_preset_raw(self):
parser = PresetParser()
parser.read_string(INTERPOLATED_PRESET)
preset = parser.get_preset('test', raw=True)
self.assertEqual(preset.args, [
('--install-symroot', '%(install_symroot)s')
])
def test_get_missing_preset(self):
parser = PresetParser()
with self.assertRaises(presets.PresetNotFoundError) as cm:
parser.get_preset('test')
e = cm.exception
self.assertEqual(e.preset_name, 'test')
def test_preset_names(self):
parser = PresetParser()
parser.read_string('[preset: foo]')
parser.read_string('[preset: bar]')
parser.read_string('[preset: baz]')
self.assertEqual(set(parser.preset_names()),
set(['foo', 'bar', 'baz']))
|
|
import unittest
import dynetx as dn
class DynGraphTestCase(unittest.TestCase):
def test_coverage(self):
G = dn.DynGraph()
G.add_interaction(0, 1, t=0)
G.add_interaction(0, 2, t=0)
G.add_interaction(0, 1, t=1)
G.add_interaction(0, 2, t=2)
G.add_interaction(0, 3, t=2)
self.assertEqual(G.coverage(), 2/3)
self.assertEqual(G.node_contribution(1), 2/3)
self.assertEqual(G.edge_contribution(0, 1), 2/3)
self.assertEqual(G.edge_contribution(0, 3), 1/3)
def test_uniformity(self):
G = dn.DynGraph()
G.add_interaction(0, 1, t=0)
G.add_interaction(0, 2, t=0)
G.add_interaction(0, 1, t=1)
G.add_interaction(0, 2, t=1)
self.assertEqual(G.uniformity(), 1)
G.add_interaction(3, 4, t=1)
G.add_interaction(5, 6, t=1)
self.assertEqual(G.uniformity(), 2/3)
self.assertEqual(G.node_pair_uniformity(0, 1), 1)
self.assertEqual(G.node_pair_uniformity(0, 3), 0.5)
def test_density(self):
G = dn.DynGraph()
G.add_interaction(0, 1, t=0)
G.add_interaction(0, 2, t=0)
G.add_interaction(0, 1, t=1)
G.add_interaction(0, 2, t=1)
self.assertEqual(G.density(), 2/3)
self.assertEqual(G.pair_density(0, 1), 1)
G.add_interaction(1, 3, t=2)
self.assertEqual(G.pair_density(0, 3), 0)
G.add_interaction(0, 3, t=2)
self.assertEqual(G.pair_density(0, 1), 2/3)
self.assertAlmostEqual(G.node_density(0), 0.5555555555555556)
self.assertEqual(G.node_presence(0), set([0, 1, 2]))
def test_self_loop(self):
G = dn.DynGraph()
G.add_interaction(0, 1, t=0)
G.add_interaction(0, 2, t=0)
G.add_interaction(0, 0, t=0)
G.add_interaction(1, 1, t=0)
G.add_interaction(2, 2, t=0)
G.add_interaction(2, 2, t=2)
ints = G.interactions(t=0)
self.assertEqual(len(ints), 5)
self.assertEqual(G.has_interaction(0, 0, t=0), True)
def test_dyngraph_add_interaction(self):
g = dn.DynGraph()
self.assertIsInstance(g, dn.DynGraph)
g.add_interaction(1, 2, 2)
g.add_interaction(1, 2, 2, e=6)
g.add_interaction(1, 2, 7, e=11)
g.add_interaction(1, 2, 8, e=15)
g.add_interaction(1, 2, 18)
g.add_interaction(1, 2, 19)
its = g.interactions()
self.assertEqual(len(its), 1)
g.add_interactions_from([(1, 3), (1, 5)], t=2)
its = g.interactions()
self.assertEqual(len(its), 3)
its = g.interactions(t=18)
self.assertEqual(len(its), 1)
its = g.interactions(t=20)
self.assertEqual(len(its), 0)
self.assertEqual(len(list(g.neighbors_iter(1))), 3)
self.assertEqual(len(list(g.neighbors_iter(1, 7))), 1)
self.assertEqual(len(list(g.neighbors_iter(1, 0))), 0)
self.assertEqual(g.order(), len(g.nodes()))
self.assertEqual(g.has_node(42), False)
self.assertEqual(g.has_node(42, 3), False)
g.add_cycle([3, 4, 5, 6], t=34)
try:
g.time_slice(2, 1)
except:
pass
g.interactions_iter([1, 2])
try:
g.add_interaction(1, 5)
except:
pass
try:
g.add_interactions_from([(1, 4), (3, 6)])
except:
pass
try:
g.remove_edge(1, 2)
except:
pass
try:
g.remove_edges_from([(1, 2)])
except:
pass
try:
g.remove_node(1)
except:
pass
try:
g.remove_nodes_from([1, 2])
except:
pass
self.assertEqual(g.number_of_interactions(1, 90), 0)
def test_nodes(self):
g = dn.DynGraph()
g.add_star([0, 1, 2, 3, 4], t=5)
nds = len(g.nodes())
self.assertEqual(nds, 5)
g.add_star([5, 1, 2, 3, 4], t=6)
nds = len(g.nodes())
self.assertEqual(nds, 6)
nds = len(g.nodes(t=6))
self.assertEqual(nds, 5)
nds = len(g.nodes(t=9))
self.assertEqual(nds, 0)
self.assertEqual(g.has_node(0), True)
self.assertEqual(g.has_node(0, 5), True)
self.assertEqual(g.has_node(0, 6), False)
self.assertEqual(g.has_node(0, 0), False)
def test_number_of_interactions(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
its = g.number_of_interactions()
self.assertEqual(its, 8)
its = g.number_of_interactions(0)
self.assertEqual(its, None)
its = g.number_of_interactions(0, 1)
self.assertEqual(its, 1)
its = g.number_of_interactions(0, 1, 5)
self.assertEqual(its, 1)
its = g.number_of_interactions(0, 1, 6)
self.assertEqual(its, 0)
def test_has_interaction(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
self.assertEqual(g.has_interaction(0, 1), True)
self.assertEqual(g.has_interaction(0, 1, 5), True)
self.assertEqual(g.has_interaction(0, 1, 6), False)
self.assertEqual(g.has_interaction(0, 1, 9), False)
def test_neighbores(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
ng = len(g.neighbors(0))
self.assertEqual(ng, 1)
ng = len(g.neighbors(0, 5))
self.assertEqual(ng, 1)
ng = len(g.neighbors(0, 6))
self.assertEqual(ng, 0)
ng = len(g.neighbors(0, 0))
self.assertEqual(ng, 0)
def test_degree(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
ng = g.degree(4)
self.assertEqual(ng, 2)
ng = g.degree(4, 5)
self.assertEqual(ng, 1)
ng = g.degree(4, 6)
self.assertEqual(ng, 1)
ng = g.degree(4, 0)
self.assertEqual(ng, 0)
def test_number_of_nodes(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
nn = g.number_of_nodes()
self.assertEqual(nn, 9)
nn = g.number_of_nodes(t=5)
self.assertEqual(nn, 5)
nn = g.number_of_nodes(t=0)
self.assertEqual(nn, 0)
avg = g.avg_number_of_nodes()
self.assertEqual(avg, 5)
def test_update_node_attr(self):
g = dn.DynGraph()
for n in [0, 1, 2, 3, 4, 5, 6, 7, 8]:
g.add_node(n, Label="A")
for n in g.nodes():
g.update_node_attr(n, Label="B")
for n in g.nodes(data=True):
self.assertEqual(n[1]['Label'], "B")
g.update_node_attr_from([0, 1, 2], Label="C")
self.assertEqual(g._node[0]['Label'], "C")
def test_add_node_attr(self):
g = dn.DynGraph()
for n in [0, 1, 2, 3, 4, 5, 6, 7, 8]:
g.add_node(n, Label="A")
g.add_nodes_from([9, 10, 11, 12], Label="A")
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
for n in g.nodes(data=True):
self.assertEqual(n[1]['Label'], "A")
nds5 = []
for n in g.nodes(data=True, t=5):
nds5.append(n[0])
self.assertEqual(n[1]['Label'], "A")
self.assertListEqual(nds5, [0, 1, 2, 3, 4])
def test_time_slice_node_attr(self):
g = dn.DynGraph()
for n in [0, 1, 2, 3, 4, 5, 6, 7, 8]:
g.add_node(n, Label="A")
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
h = g.time_slice(5)
for n in h.nodes(data=True):
self.assertEqual(n[1]['Label'], "A")
self.assertIsInstance(h, dn.DynGraph)
self.assertEqual(h.number_of_nodes(), 5)
self.assertEqual(h.number_of_interactions(), 4)
def test_time_slice(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
h = g.time_slice(5)
self.assertIsInstance(h, dn.DynGraph)
self.assertEqual(h.number_of_nodes(), 5)
self.assertEqual(h.number_of_interactions(), 4)
h = g.time_slice(5, 5)
self.assertIsInstance(h, dn.DynGraph)
self.assertEqual(h.number_of_nodes(), 5)
self.assertEqual(h.number_of_interactions(), 4)
h = g.time_slice(5, 6)
self.assertIsInstance(h, dn.DynGraph)
self.assertEqual(h.number_of_nodes(), 9)
self.assertEqual(h.number_of_interactions(), 8)
h = g.time_slice(0)
self.assertIsInstance(h, dn.DynGraph)
self.assertEqual(h.number_of_nodes(), 0)
self.assertEqual(h.number_of_interactions(), 0)
def test_temporal_snapshots_ids(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
tsd = g.temporal_snapshots_ids()
self.assertEqual(tsd, [5, 6])
def test_interactions_per_snapshots(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=5)
g.add_path([4, 5, 6, 7, 8], t=6)
tsd = g.interactions_per_snapshots()
self.assertDictEqual(tsd, {5: 4, 6: 4})
tsd = g.interactions_per_snapshots(t=5)
self.assertEqual(tsd, 4)
tsd = g.interactions_per_snapshots(t=0)
self.assertEqual(tsd, 0)
def test_inter_event_time(self):
g = dn.DynGraph()
g.add_path([0, 1, 2, 3, 4], t=2)
g.add_path([4, 5, 6, 7, 8], t=3)
ivt = g.inter_event_time_distribution()
self.assertDictEqual(ivt, {0: 6, 1: 1})
ivt = g.inter_event_time_distribution(4)
self.assertDictEqual(ivt, {1: 1})
ivt = g.inter_event_time_distribution(0)
self.assertDictEqual(ivt, {})
ivt = g.inter_event_time_distribution(0, 1)
self.assertDictEqual(ivt, {})
def test_stream_interactions(self):
g = dn.DynGraph()
g.add_interaction(1, 2, 2)
g.add_interaction(1, 2, 2, e=6)
g.add_interaction(1, 2, 7, e=11)
g.add_interaction(1, 2, 8, e=15)
g.add_interaction(1, 2, 18)
g.add_interaction(1, 2, 19)
g.add_interactions_from([(1, 3), (1, 5)], t=2, e=3)
sres = list(g.stream_interactions())
cres = [(1, 2, '+', 2), (1, 3, '+', 2), (1, 5, '+', 2), (1, 3, '-', 3),
(1, 5, '-', 3), (1, 2, '-', 6), (1, 2, '+', 7), (1, 2, '-', 15), (1, 2, '+', 18)]
self.assertEqual(sorted(sres), sorted(cres))
def test_accumulative_growth(self):
g = dn.DynGraph(edge_removal=False)
g.add_interaction(1, 2, 2)
g.add_interaction(1, 2, 2, e=6)
g.add_interaction(1, 2, 7, e=11)
g.add_interaction(1, 2, 8, e=15)
g.add_interaction(1, 2, 18)
g.add_interaction(1, 2, 19)
g.add_interactions_from([(1, 3), (1, 5)], t=2, e=3)
sres = list(g.stream_interactions())
cres = [(1, 2, '+', 2), (1, 5, '+', 2), (1, 3, '+', 2)]
self.assertEqual(sorted(sres), sorted(cres))
self.assertEqual(g.has_interaction(1, 2, 18), True)
self.assertEqual(g.has_interaction(1, 2, 40), False)
try:
g.add_interaction(2, 1, 7)
except:
pass
def test_conversion(self):
G = dn.DynGraph()
G.add_interaction(0, 1, t=0)
G.add_interaction(0, 2, t=0)
G.add_interaction(0, 0, t=0)
G.add_interaction(1, 1, t=0)
G.add_interaction(2, 2, t=0)
G.add_interaction(2, 2, t=2)
H = G.to_directed()
self.assertIsInstance(H, dn.DynDiGraph)
self.assertEqual(H.number_of_nodes(), 3)
self.assertEqual(H.number_of_edges(), 5)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False,
ns_name=None):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
self.router = router
self.ns_name = ns_name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
# radvd is a neutron.agent.linux.ra.DaemonMonitor
self.radvd = None
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self, self._router.get('gw_port'),
*args, action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
"""Configures the floating ip address on the device.
"""
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
net = netaddr.IPNetwork(ip_cidr)
device.addr.add(net.version, ip_cidr, str(net.broadcast))
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
net = netaddr.IPNetwork(ip_cidr)
device.addr.delete(net.version, ip_cidr)
self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from stackalytics.processor import record_processor
from stackalytics.processor import runtime_storage
from stackalytics.processor import utils
LP_URI = 'https://api.launchpad.net/1.0/people/?ws.op=getByEmail&email=%s'
def _make_users(users):
users_index = {}
for user in users:
if 'user_id' in user:
users_index[user['user_id']] = user
if 'launchpad_id' in user:
users_index[user['launchpad_id']] = user
for email in user['emails']:
users_index[email] = user
return users_index
def _make_companies(companies):
domains_index = {}
for company in companies:
for domain in company['domains']:
domains_index[domain] = company['company_name']
return domains_index
class TestRecordProcessor(testtools.TestCase):
def setUp(self):
super(TestRecordProcessor, self).setUp()
companies = [
{
'company_name': 'SuperCompany',
'domains': ['super.com', 'super.no']
},
{
"domains": ["nec.com", "nec.co.jp"],
"company_name": "NEC"
},
{
'company_name': '*independent',
'domains': ['']
},
]
self.user = {
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['[email protected]', '[email protected]'],
'companies': [
{'company_name': '*independent',
'end_date': 1234567890},
{'company_name': 'SuperCompany',
'end_date': 0},
]
}
self.get_users = mock.Mock(return_value=[
self.user,
])
releases = [
{
'release_name': 'prehistory',
'end_date': utils.date_to_timestamp('2011-Apr-21')
},
{
'release_name': 'Diablo',
'end_date': utils.date_to_timestamp('2011-Sep-08')
},
{
'release_name': 'Zoo',
'end_date': utils.date_to_timestamp('2035-Sep-08')
},
]
def get_by_key(table):
if table == 'companies':
return _make_companies(companies)
elif table == 'users':
return _make_users(self.get_users())
elif table == 'releases':
return releases
else:
raise Exception('Wrong table %s' % table)
p_storage = mock.Mock(runtime_storage.RuntimeStorage)
p_storage.get_by_key = mock.Mock(side_effect=get_by_key)
self.runtime_storage = p_storage
self.commit_processor = record_processor.RecordProcessor(p_storage)
self.read_json_from_uri_patch = mock.patch(
'stackalytics.processor.utils.read_json_from_uri')
self.read_json = self.read_json_from_uri_patch.start()
def tearDown(self):
super(TestRecordProcessor, self).tearDown()
self.read_json_from_uri_patch.stop()
def _generate_commits(self, email='[email protected]', date=1999999999):
yield {
'record_type': 'commit',
'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1',
'author_name': 'John Doe',
'author_email': email,
'date': date,
'lines_added': 25,
'lines_deleted': 9,
'release_name': 'havana',
}
def test_get_company_by_email_mapped(self):
email = '[email protected]'
res = self.commit_processor._get_company_by_email(email)
self.assertEquals('SuperCompany', res)
def test_get_company_by_email_with_long_suffix_mapped(self):
email = '[email protected]'
res = self.commit_processor._get_company_by_email(email)
self.assertEquals('NEC', res)
def test_get_company_by_email_with_long_suffix_mapped_2(self):
email = '[email protected]'
res = self.commit_processor._get_company_by_email(email)
self.assertEquals('NEC', res)
def test_get_company_by_email_not_mapped(self):
email = '[email protected]'
res = self.commit_processor._get_company_by_email(email)
self.assertEquals(None, res)
def test_update_commit_existing_user(self):
commit_generator = self._generate_commits()
commit = list(self.commit_processor.process(commit_generator))[0]
self.assertEquals('SuperCompany', commit['company_name'])
self.assertEquals('john_doe', commit['launchpad_id'])
def test_update_commit_existing_user_old_job(self):
commit_generator = self._generate_commits(date=1000000000)
commit = list(self.commit_processor.process(commit_generator))[0]
self.assertEquals('*independent', commit['company_name'])
self.assertEquals('john_doe', commit['launchpad_id'])
def test_update_commit_existing_user_new_email_known_company(self):
"""
User is known to LP, his email is new to us, and maps to other company
Should return other company instead of those mentioned in user db
"""
email = '[email protected]'
commit_generator = self._generate_commits(email=email)
launchpad_id = 'john_doe'
self.read_json.return_value = {'name': launchpad_id,
'display_name': launchpad_id}
user = self.user.copy()
# tell storage to return existing user
self.get_users.return_value = [user]
commit = list(self.commit_processor.process(commit_generator))[0]
self.runtime_storage.set_by_key.assert_called_with('users', mock.ANY)
self.read_json.assert_called_once_with(LP_URI % email)
self.assertIn(email, user['emails'])
self.assertEquals('NEC', commit['company_name'])
self.assertEquals(launchpad_id, commit['launchpad_id'])
def test_update_commit_existing_user_new_email_unknown_company(self):
"""
User is known to LP, but his email is new to us. Should match
the user and return current company
"""
email = '[email protected]'
commit_generator = self._generate_commits(email=email)
launchpad_id = 'john_doe'
self.read_json.return_value = {'name': launchpad_id,
'display_name': launchpad_id}
user = self.user.copy()
# tell storage to return existing user
self.get_users.return_value = [user]
commit = list(self.commit_processor.process(commit_generator))[0]
self.runtime_storage.set_by_key.assert_called_with('users', mock.ANY)
self.read_json.assert_called_once_with(LP_URI % email)
self.assertIn(email, user['emails'])
self.assertEquals('SuperCompany', commit['company_name'])
self.assertEquals(launchpad_id, commit['launchpad_id'])
def test_update_commit_new_user(self):
"""
User is known to LP, but new to us
Should add new user and set company depending on email
"""
email = '[email protected]'
commit_generator = self._generate_commits(email=email)
launchpad_id = 'smith'
self.read_json.return_value = {'name': launchpad_id,
'display_name': 'Smith'}
self.get_users.return_value = []
commit = list(self.commit_processor.process(commit_generator))[0]
self.read_json.assert_called_once_with(LP_URI % email)
self.assertEquals('NEC', commit['company_name'])
self.assertEquals(launchpad_id, commit['launchpad_id'])
def test_update_commit_new_user_unknown_to_lb(self):
"""
User is new to us and not known to LP
Should set user name and empty LPid
"""
email = '[email protected]'
commit_generator = self._generate_commits(email=email)
self.read_json.return_value = None
self.get_users.return_value = []
commit = list(self.commit_processor.process(commit_generator))[0]
self.read_json.assert_called_once_with(LP_URI % email)
self.assertEquals('*independent', commit['company_name'])
self.assertEquals(None, commit['launchpad_id'])
def test_update_commit_invalid_email(self):
"""
User's email is malformed
"""
email = 'error.root'
commit_generator = self._generate_commits(email=email)
self.read_json.return_value = None
self.get_users.return_value = []
commit = list(self.commit_processor.process(commit_generator))[0]
self.assertEquals(0, self.read_json.called)
self.assertEquals('*independent', commit['company_name'])
self.assertEquals(None, commit['launchpad_id'])
def _generate_record_commit(self):
yield {'commit_id': u'0afdc64bfd041b03943ceda7849c4443940b6053',
'lines_added': 9,
'module': u'stackalytics',
'record_type': 'commit',
'message': u'Closes bug 1212953\n\nChange-Id: '
u'I33f0f37b6460dc494abf2520dc109c9893ace9e6\n',
'subject': u'Fixed affiliation of Edgar and Sumit',
'loc': 10,
'user_id': u'john_doe',
'primary_key': u'0afdc64bfd041b03943ceda7849c4443940b6053',
'author_email': u'[email protected]',
'company_name': u'SuperCompany',
'record_id': 6,
'lines_deleted': 1,
'week': 2275,
'blueprint_id': None,
'bug_id': u'1212953',
'files_changed': 1,
'author_name': u'John Doe',
'date': 1376737923,
'launchpad_id': u'john_doe',
'branches': set([u'master']),
'change_id': u'I33f0f37b6460dc494abf2520dc109c9893ace9e6',
'release': u'havana'}
def test_update_record_no_changes(self):
commit_generator = self._generate_record_commit()
release_index = {'0afdc64bfd041b03943ceda7849c4443940b6053': 'havana'}
updated = list(self.commit_processor.update(commit_generator,
release_index))
self.assertEquals(0, len(updated))
|
|
#
# astro.py -- classes for special astronomy shapes drawn on
# ginga canvases.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import numpy
from ginga.canvas.CanvasObject import (CanvasObjectBase, _bool, _color,
Point, MovePoint, ScalePoint,
RotatePoint,
register_canvas_types, get_canvas_type,
colors_plus_none)
from ginga.misc.ParamSet import Param
from ginga.misc.Bunch import Bunch
from ginga.util import wcs
from .mixins import OnePointMixin, TwoPointMixin, OnePointOneRadiusMixin
from .layer import CompoundObject
class Ruler(TwoPointMixin, CanvasObjectBase):
"""Draws a WCS ruler (like a right triangle) on a DrawingCanvas.
Parameters are:
x1, y1: 0-based coordinates of one end of the diagonal in the data space
x2, y2: 0-based coordinates of the opposite end of the diagonal
Optional parameters for linesize, color, etc.
"""
@classmethod
def get_params_metadata(cls):
return [
## Param(name='coord', type=str, default='data',
## valid=['data', 'wcs'],
## description="Set type of coordinates"),
Param(name='x1', type=float, default=0.0, argpos=0,
description="First X coordinate of object"),
Param(name='y1', type=float, default=0.0, argpos=1,
description="First Y coordinate of object"),
Param(name='x2', type=float, default=0.0, argpos=2,
description="Second X coordinate of object"),
Param(name='y2', type=float, default=0.0, argpos=3,
description="Second Y coordinate of object"),
Param(name='linewidth', type=int, default=1,
min=1, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default: solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='lightgreen',
description="Color of outline"),
Param(name='showplumb', type=_bool,
default=True, valid=[False, True],
description="Show plumb lines for the ruler"),
Param(name='color2',
valid=colors_plus_none, type=_color, default='yellow',
description="Second color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
Param(name='units', type=str, default='arcmin',
valid=['arcmin', 'degrees', 'pixels'],
description="Units for text distance (default: arcmin)"),
Param(name='font', type=str, default='Sans Serif',
description="Font family for text"),
Param(name='fontsize', type=int, default=None,
min=8, max=72,
description="Font size of text (default: vary by scale)"),
Param(name='showcap', type=_bool,
default=False, valid=[False, True],
description="Show caps for this object"),
]
@classmethod
def idraw(cls, canvas, cxt):
return cls(cxt.start_x, cxt.start_y, cxt.x, cxt.y, **cxt.drawparams)
def __init__(self, x1, y1, x2, y2, color='green', color2='yellow',
alpha=1.0, linewidth=1, linestyle='solid',
showcap=True, showplumb=True, units='arcmin',
font='Sans Serif', fontsize=None, **kwdargs):
self.kind = 'ruler'
CanvasObjectBase.__init__(self, color=color, color2=color2,
alpha=alpha, units=units,
showplumb=showplumb,
linewidth=linewidth, showcap=showcap,
linestyle=linestyle,
x1=x1, y1=y1, x2=x2, y2=y2,
font=font, fontsize=fontsize,
**kwdargs)
TwoPointMixin.__init__(self)
def get_points(self):
points = [(self.x1, self.y1), (self.x2, self.y2)]
points = self.get_data_points(points=points)
return points
def select_contains(self, viewer, data_x, data_y):
points = self.get_points()
x1, y1 = points[0]
x2, y2 = points[1]
return self.within_line(viewer, data_x, data_y, x1, y1, x2, y2,
self.cap_radius)
def get_ruler_distances(self, viewer):
mode = self.units.lower()
points = self.get_points()
x1, y1 = points[0]
x2, y2 = points[1]
try:
image = viewer.get_image()
if mode in ('arcmin', 'degrees'):
# Calculate RA and DEC for the three points
# origination point
ra_org, dec_org = image.pixtoradec(x1, y1)
# destination point
ra_dst, dec_dst = image.pixtoradec(x2, y2)
# "heel" point making a right triangle
ra_heel, dec_heel = image.pixtoradec(x2, y1)
if mode == 'arcmin':
text_h = wcs.get_starsep_RaDecDeg(ra_org, dec_org,
ra_dst, dec_dst)
text_x = wcs.get_starsep_RaDecDeg(ra_org, dec_org,
ra_heel, dec_heel)
text_y = wcs.get_starsep_RaDecDeg(ra_heel, dec_heel,
ra_dst, dec_dst)
else:
sep_h = wcs.deltaStarsRaDecDeg(ra_org, dec_org,
ra_dst, dec_dst)
text_h = str(sep_h)
sep_x = wcs.deltaStarsRaDecDeg(ra_org, dec_org,
ra_heel, dec_heel)
text_x = str(sep_x)
sep_y = wcs.deltaStarsRaDecDeg(ra_heel, dec_heel,
ra_dst, dec_dst)
text_y = str(sep_y)
else:
dx = abs(x2 - x1)
dy = abs(y2 - y1)
dh = math.sqrt(dx**2 + dy**2)
text_x = str(dx)
text_y = str(dy)
text_h = ("%.3f" % dh)
except Exception as e:
text_h = 'BAD WCS'
text_x = 'BAD WCS'
text_y = 'BAD WCS'
return (text_x, text_y, text_h)
def draw(self, viewer):
points = self.get_points()
x1, y1 = points[0]
x2, y2 = points[1]
cx1, cy1 = self.canvascoords(viewer, x1, y1)
cx2, cy2 = self.canvascoords(viewer, x2, y2)
text_x, text_y, text_h = self.get_ruler_distances(viewer)
cr = viewer.renderer.setup_cr(self)
cr.set_font_from_shape(self)
cr.draw_line(cx1, cy1, cx2, cy2)
self.draw_arrowhead(cr, cx1, cy1, cx2, cy2)
self.draw_arrowhead(cr, cx2, cy2, cx1, cy1)
# calculate offsets and positions for drawing labels
# try not to cover anything up
xtwd, xtht = cr.text_extents(text_x)
ytwd, ytht = cr.text_extents(text_y)
htwd, htht = cr.text_extents(text_h)
diag_xoffset = 0
diag_yoffset = 0
xplumb_yoffset = 0
yplumb_xoffset = 0
diag_yoffset = 14
if abs(cy1 - cy2) < 5:
show_angle = 0
elif cy1 < cy2:
xplumb_yoffset = -4
else:
xplumb_yoffset = 14
diag_yoffset = -4
if abs(cx1 - cx2) < 5:
diag_xoffset = -(4 + htwd)
show_angle = 0
elif (cx1 < cx2):
diag_xoffset = -(4 + htwd)
yplumb_xoffset = 4
else:
diag_xoffset = 4
yplumb_xoffset = -(4 + ytwd)
xh = min(cx1, cx2); y = cy1 + xplumb_yoffset
xh += (max(cx1, cx2) - xh) // 2
yh = min(cy1, cy2); x = cx2 + yplumb_xoffset
yh += (max(cy1, cy2) - yh) // 2
xd = xh + diag_xoffset
yd = yh + diag_yoffset
cr.draw_text(xd, yd, text_h)
if self.showplumb:
if self.color2:
alpha = getattr(self, 'alpha', 1.0)
cr.set_line(self.color2, alpha=alpha, style='dash')
# draw X plumb line
cr.draw_line(cx1, cy1, cx2, cy1)
# draw Y plumb line
cr.draw_line(cx2, cy1, cx2, cy2)
# draw X plum line label
xh -= xtwd // 2
cr.draw_text(xh, y, text_x)
# draw Y plum line label
cr.draw_text(x, yh, text_y)
if self.showcap:
self.draw_caps(cr, self.cap, ((cx2, cy1), ))
class Compass(OnePointOneRadiusMixin, CanvasObjectBase):
"""Draws a WCS compass on a DrawingCanvas.
Parameters are:
x, y: 0-based coordinates of the center in the data space
radius: radius of the compass arms, in data units
Optional parameters for linesize, color, etc.
"""
@classmethod
def get_params_metadata(cls):
return [
## Param(name='coord', type=str, default='data',
## valid=['data'],
## description="Set type of coordinates"),
Param(name='x', type=float, default=0.0, argpos=0,
description="X coordinate of center of object"),
Param(name='y', type=float, default=0.0, argpos=1,
description="Y coordinate of center of object"),
Param(name='radius', type=float, default=1.0, argpos=2,
min=0.0,
description="Radius of object"),
Param(name='linewidth', type=int, default=1,
min=1, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='skyblue',
description="Color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
Param(name='font', type=str, default='Sans Serif',
description="Font family for text"),
Param(name='fontsize', type=int, default=None,
min=8, max=72,
description="Font size of text (default: vary by scale)"),
Param(name='showcap', type=_bool,
default=False, valid=[False, True],
description="Show caps for this object"),
]
@classmethod
def idraw(cls, canvas, cxt):
radius = max(abs(cxt.start_x - cxt.x), abs(cxt.start_y - cxt.y))
return cls(cxt.start_x, cxt.start_y, radius, **cxt.drawparams)
def __init__(self, x, y, radius, color='skyblue',
linewidth=1, fontsize=None, font='Sans Serif',
alpha=1.0, linestyle='solid', showcap=True, **kwdargs):
self.kind = 'compass'
CanvasObjectBase.__init__(self, color=color, alpha=alpha,
linewidth=linewidth, showcap=showcap,
linestyle=linestyle,
x=x, y=y, radius=radius,
font=font, fontsize=fontsize,
**kwdargs)
OnePointOneRadiusMixin.__init__(self)
def get_points(self):
# TODO: this attribute will be deprecated--fix!
viewer = self.viewer
image = viewer.get_image()
x, y, xn, yn, xe, ye = image.calc_compass_radius(self.x,
self.y,
self.radius)
return [(x, y), (xn, yn), (xe, ye)]
def get_edit_points(self, viewer):
c_pt, n_pt, e_pt = self.get_points()
return [ MovePoint(*c_pt), ScalePoint(*n_pt), ScalePoint(*e_pt) ]
def set_edit_point(self, i, pt, detail):
if i == 0:
x, y = pt
self.move_to(x, y)
elif i in (1, 2):
x, y = pt
self.radius = max(abs(x - self.x), abs(y - self.y))
else:
raise ValueError("No point corresponding to index %d" % (i))
def select_contains(self, viewer, data_x, data_y):
xd, yd = self.crdmap.to_data(self.x, self.y)
return self.within_radius(viewer, data_x, data_y, xd, yd,
self.cap_radius)
def draw(self, viewer):
(cx1, cy1), (cx2, cy2), (cx3, cy3) = self.get_cpoints(viewer)
cr = viewer.renderer.setup_cr(self)
cr.set_font_from_shape(self)
# draw North line and arrowhead
cr.draw_line(cx1, cy1, cx2, cy2)
self.draw_arrowhead(cr, cx1, cy1, cx2, cy2)
# draw East line and arrowhead
cr.draw_line(cx1, cy1, cx3, cy3)
self.draw_arrowhead(cr, cx1, cy1, cx3, cy3)
# draw "N" & "E"
cx, cy = self.get_textpos(cr, 'N', cx1, cy1, cx2, cy2)
cr.draw_text(cx, cy, 'N')
cx, cy = self.get_textpos(cr, 'E', cx1, cy1, cx3, cy3)
cr.draw_text(cx, cy, 'E')
if self.showcap:
self.draw_caps(cr, self.cap, ((cx1, cy1), ))
def get_textpos(self, cr, text, cx1, cy1, cx2, cy2):
htwd, htht = cr.text_extents(text)
diag_xoffset = 0
diag_yoffset = 0
xplumb_yoffset = 0
yplumb_xoffset = 0
diag_yoffset = 14
if abs(cy1 - cy2) < 5:
pass
elif cy1 < cy2:
xplumb_yoffset = -4
else:
xplumb_yoffset = 14
diag_yoffset = -4
if abs(cx1 - cx2) < 5:
diag_xoffset = -(4 + htwd)
elif (cx1 < cx2):
diag_xoffset = -(4 + htwd)
yplumb_xoffset = 4
else:
diag_xoffset = 4
yplumb_xoffset = -(4 + 0)
xh = min(cx1, cx2); y = cy1 + xplumb_yoffset
xh += (max(cx1, cx2) - xh) // 2
yh = min(cy1, cy2); x = cx2 + yplumb_xoffset
yh += (max(cy1, cy2) - yh) // 2
xd = xh + diag_xoffset
yd = yh + diag_yoffset
return (xd, yd)
class Crosshair(OnePointMixin, CanvasObjectBase):
"""Draws a crosshair on a DrawingCanvas.
Parameters are:
x, y: 0-based coordinates of the center in the data space
Optional parameters for linesize, color, etc.
"""
@classmethod
def get_params_metadata(cls):
return [
## Param(name='coord', type=str, default='data',
## valid=['data'],
## description="Set type of coordinates"),
Param(name='x', type=float, default=0.0, argpos=0,
description="X coordinate of center of object"),
Param(name='y', type=float, default=0.0, argpos=1,
description="Y coordinate of center of object"),
Param(name='linewidth', type=int, default=1,
min=1, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='green',
description="Color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
Param(name='text', type=str, default=None,
description="Text annotation"),
Param(name='textcolor',
valid=colors_plus_none, type=_color, default='yellow',
description="Color of text annotation"),
Param(name='font', type=str, default='Sans Serif',
description="Font family for text"),
Param(name='fontsize', type=int, default=None,
min=8, max=72,
description="Font size of text (default: vary by scale)"),
Param(name='format', type=str, default='xy',
valid=['xy', 'value', 'coords'],
description="Format for text annotation (default: xy)"),
]
@classmethod
def idraw(cls, canvas, cxt):
return cls(cxt.x, cxt.y, **cxt.drawparams)
def __init__(self, x, y, color='green',
linewidth=1, alpha=1.0, linestyle='solid',
text=None, textcolor='yellow',
fontsize=None, font='Sans Serif', format='xy',
**kwdargs):
self.kind = 'crosshair'
CanvasObjectBase.__init__(self, color=color, alpha=alpha,
linewidth=linewidth, linestyle=linestyle,
text=text, textcolor=textcolor,
fontsize=fontsize, font=font,
x=x, y=y, format=format, **kwdargs)
OnePointMixin.__init__(self)
def select_contains(self, viewer, data_x, data_y):
xd, yd = self.crdmap.to_data(self.x, self.y)
return self.within_radius(viewer, data_x, data_y, xd, yd,
self.cap_radius)
def draw(self, viewer):
wd, ht = viewer.get_window_size()
cpoints = self.get_cpoints(viewer)
(cx, cy) = cpoints[0]
hx1, hx2 = 0, wd
hy1 = hy2 = cy
vy1, vy2 = 0, ht
vx1 = vx2 = cx
if self.text is None:
if self.format == 'xy':
text = "X:%f, Y:%f" % (self.x, self.y)
else:
image = viewer.get_image()
# NOTE: x, y are assumed to be in data coordinates
info = image.info_xy(self.x, self.y, viewer.get_settings())
if self.format == 'coords':
text = "%s:%s, %s:%s" % (info.ra_lbl, info.ra_txt,
info.dec_lbl, info.dec_txt)
else:
text = "V: %f" % (info.value)
else:
text = self.text
cr = viewer.renderer.setup_cr(self)
cr.set_font_from_shape(self)
# draw horizontal line
cr.draw_line(hx1, hy1, hx2, hy2)
# draw vertical line
cr.draw_line(vx1, vy1, vx2, vy2)
txtwd, txtht = cr.text_extents(text)
cr.set_line(self.textcolor, alpha=self.alpha)
cr.draw_text(cx+10, cy+4+txtht, text)
class AnnulusMixin(object):
def contains(self, x, y):
"""Containment test."""
obj1, obj2 = self.objects
return obj2.contains(x, y) and numpy.logical_not(obj1.contains(x, y))
def contains_arr(self, x_arr, y_arr):
"""Containment test on arrays."""
obj1, obj2 = self.objects
arg1 = obj2.contains_arr(x_arr, y_arr)
arg2 = numpy.logical_not(obj1.contains_arr(x_arr, y_arr))
return numpy.logical_and(arg1, arg2)
def get_llur(self):
"""Bounded by outer object."""
obj2 = self.objects[1]
return obj2.get_llur()
def select_contains(self, viewer, data_x, data_y):
obj2 = self.objects[1]
return obj2.select_contains(viewer, data_x, data_y)
class Annulus(AnnulusMixin, OnePointOneRadiusMixin, CompoundObject):
"""Special compound object to handle annulus shape that
consists of two objects with the same centroid.
Examples
--------
>>> tag = canvas.add(Annulus(100, 200, 10, width=5, atype='circle'))
>>> obj = canvas.getObjectByTag(tag)
>>> arr_masked = image.cutout_shape(obj)
"""
@classmethod
def get_params_metadata(cls):
return [
## Param(name='coord', type=str, default='data',
## valid=['data', 'wcs'],
## description="Set type of coordinates"),
Param(name='x', type=float, default=0.0, argpos=0,
description="X coordinate of center of object"),
Param(name='y', type=float, default=0.0, argpos=1,
description="Y coordinate of center of object"),
Param(name='radius', type=float, default=1.0, argpos=2,
min=0.0,
description="Inner radius of annulus"),
Param(name='width', type=float, default=None,
min=0.0,
description="Width of annulus"),
Param(name='atype', type=str, default='circle',
valid=['circle', 'squarebox'],
description="Type of annulus"),
Param(name='linewidth', type=int, default=1,
min=1, max=20, widget='spinbutton', incr=1,
description="Width of outline"),
Param(name='linestyle', type=str, default='solid',
valid=['solid', 'dash'],
description="Style of outline (default solid)"),
Param(name='color',
valid=colors_plus_none, type=_color, default='yellow',
description="Color of outline"),
Param(name='alpha', type=float, default=1.0,
min=0.0, max=1.0, widget='spinfloat', incr=0.05,
description="Opacity of outline"),
]
@classmethod
def idraw(cls, canvas, cxt):
radius = math.sqrt(abs(cxt.start_x - cxt.x)**2 +
abs(cxt.start_y - cxt.y)**2 )
return cls(cxt.start_x, cxt.start_y, radius,
**cxt.drawparams)
def __init__(self, x, y, radius, width=None,
atype='circle', color='yellow',
linewidth=1, linestyle='solid', alpha=1.0,
**kwdargs):
if width is None:
# default width is 15% of radius
width = 0.15 * radius
oradius = radius + width
if oradius < radius:
raise ValueError('Outer boundary < inner boundary')
coord = kwdargs.get('coord', None)
klass = get_canvas_type(atype)
obj1 = klass(x, y, radius, color=color,
linewidth=linewidth,
linestyle=linestyle, alpha=alpha,
coord=coord)
obj1.editable = False
obj2 = klass(x, y, oradius, color=color,
linewidth=linewidth,
linestyle=linestyle, alpha=alpha,
coord=coord)
obj2.editable = False
CompoundObject.__init__(self, obj1, obj2,
x=x, y=y, radius=radius,
width=width, color=color,
linewidth=linewidth, linestyle=linestyle,
alpha=alpha, **kwdargs)
OnePointOneRadiusMixin.__init__(self)
self.editable = True
self.opaque = True
self.kind = 'annulus'
def get_edit_points(self, viewer):
points = ((self.x, self.y),
self.crdmap.offset_pt((self.x, self.y),
self.radius, 0),
self.crdmap.offset_pt((self.x, self.y),
self.radius + self.width, 0),
)
points = self.get_data_points(points=points)
return [MovePoint(*points[0]),
ScalePoint(*points[1]),
Point(*points[2])]
def setup_edit(self, detail):
detail.center_pos = self.get_center_pt()
detail.radius = self.radius
detail.width = self.width
def set_edit_point(self, i, pt, detail):
if i == 0:
# move control point
x, y = pt
self.move_to(x, y)
else:
if i == 1:
scalef = self.calc_scale_from_pt(pt, detail)
# inner obj radius control pt
self.radius = detail.radius * scalef
elif i == 2:
scalef = self.calc_scale_from_pt(pt, detail)
width = detail.width * scalef
# outer obj radius control pt--calculate new width
assert width > 0, ValueError("Must have a positive width")
self.width = width
else:
raise ValueError("No point corresponding to index %d" % (i))
self.sync_state()
def sync_state(self):
"""Called to synchronize state (e.g. when parameters have changed).
"""
oradius = self.radius + self.width
if oradius < self.radius:
raise ValueError('Outer boundary < inner boundary')
d = dict(x=self.x, y=self.y, radius=self.radius, color=self.color,
linewidth=self.linewidth, linestyle=self.linestyle,
alpha=self.alpha)
# update inner object
self.objects[0].__dict__.update(d)
# update outer object
d['radius'] = oradius
self.objects[1].__dict__.update(d)
def move_to(self, xdst, ydst):
super(Annulus, self).move_to(xdst, ydst)
self.set_data_points([(xdst, ydst)])
register_canvas_types(dict(ruler=Ruler, compass=Compass,
crosshair=Crosshair, annulus=Annulus))
#END
|
|
"""
Module to set up run time parameters for Clawpack -- AMRClaw code.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = 204.905 # xlower
clawdata.upper[0] = 204.965 # xupper
clawdata.lower[1] = 19.71 # ylower
clawdata.upper[1] = 19.758 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 108 # 2-sec # mx
clawdata.num_cells[1] = 88 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 14
clawdata.tfinal = 7*3600.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = 3600. * np.linspace(1,4,97)
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 1
clawdata.total_steps = 10
clawdata.output_t0 = False # output at initial (or restart) time?
clawdata.output_format = 'binary' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'none' # could be list
clawdata.output_aux_onlyonce = True # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==Falseixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.75
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer', 'vanleer', 'vanleer']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 1
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'extrap' # at xupper
clawdata.bc_lower[1] = 'extrap' # at ylower
clawdata.bc_upper[1] = 'user' # at yupper
# ---------------
# Gauges:
# ---------------
gauges = rundata.gaugedata.gauges
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
gauges.append([1125, 204.91802, 19.74517, 0., 1.e9]) #Hilo
gauges.append([1126, 204.93003, 19.74167, 0., 1.e9]) #Hilo
# gauges.append([11261, 204.93003, 19.739, 0., 1.e9])
# #Hilo
# Tide gauge:
gauges.append([7760, 204.9437, 19.7306, 0., 1.e9]) # Hilo
gauges.append([7761, 204.9447, 19.7308, 0., 1.e9]) # From Benchmark descr.
gauges.append([7762, 204.9437, 19.7307, 0., 1.e9]) # Shift so depth > 0
# Gauge at point requested by Pat Lynett:
gauges.append([3333, 204.93, 19.7576, 0., 1.e9])
if 0:
# Array of synthetic gauges originally used to find S2 location:
dx = .0005
for i in range(6):
x = 204.93003 - i*dx
for j in range(5):
y = 19.74167 + (j-2)*dx
gauges.append([10*(j+1)+i+1, x, y, 0., 1.e9])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = np.array([7.5,8,8.5,9,9.5]) * 3600.
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters: (written to amr.data)
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 3
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2,3]
amrdata.refinement_ratios_y = [2,3]
amrdata.refinement_ratios_t = [2,3]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft']
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False # use Richardson?
amrdata.flag_richardson_tol = 1.0 # Richardson tolerance
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = True # use this?
amrdata.flag2refine_tol = 0.5 # tolerance used in this routine
# Note: in geoclaw the refinement tolerance is set as wave_tolerance below
# and flag2refine_tol is unused!
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
regions = rundata.regiondata.regions
regions.append([1, 1, 0., 1e9, 0, 360, -90, 90])
regions.append([1, 2, 0., 1e9, 204.9, 204.95, 19.7, 19.754])
regions.append([1, 3, 0., 1e9, 204.9, 204.95, 19.7, 19.751])
regions.append([1, 4, 0., 1e9, 204.9, 204.95, 19.72, 19.748])
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367500.0
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.
geo_data.dry_tolerance = 0.001
geo_data.friction_forcing = True
geo_data.manning_coefficient = 0.025
geo_data.friction_depth = 500.0
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.02
refinement_data.deep_depth = 200.0
refinement_data.max_level_deep = 4
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
topodir = '../'
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'hilo_flattened.tt2'])
topofiles.append([2, 1, 1, 0.0, 1e10, topodir+'flat.tt2'])
# == setdtopo.data values ==
#rundata.dtopo_data.dtopofiles = [[1, 3, 3, topodir + 'Fujii.txydz']]
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# == fixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
fixedgrids = rundata.fixed_grid_data.fixedgrids
# == fgmax.data values ==
fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
fgmax_files.append('fgmax_grid.txt')
rundata.fgmax_data.num_fgmax_val = 2
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
from clawpack.geoclaw import kmltools
kmltools.regions2kml()
kmltools.gauges2kml()
|
|
# Copyright (c) 2018-2021 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import datetime
import glob
import inspect
import re
import sys
import traceback
from decimal import Decimal
from io import IOBase
from tempfile import NamedTemporaryFile, SpooledTemporaryFile, TemporaryFile
from uuid import UUID
from collections import OrderedDict
# _TemporaryFileWrapper is an undocumented implementation detail, so
# import defensively.
try:
from tempfile import _TemporaryFileWrapper
except ImportError:
_TemporaryFileWrapper = None
import six
# noinspection PyUnresolvedReferences,PyCompatibility
from six import binary_type, text_type, string_types, integer_types, BytesIO, StringIO
from six.moves import zip
from .. import errors, os_utils
from ..compat import as_text
from ..vertica import messages
from ..vertica.column import Column
# A note regarding support for temporary files:
#
# Since Python 2.6, the tempfile module offers three kinds of temporary
# files:
#
# * NamedTemporaryFile
# * SpooledTemporaryFile
# * TemporaryFile
#
# NamedTemporaryFile is not a class, but a function that returns
# an instance of the tempfile._TemporaryFileWrapper class.
# _TemporaryFileWrapper is a direct subclass of object.
#
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L546
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L450
#
# SpooledTemporaryFile is a class that is a direct subclass of object.
#
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L623
#
# TemporaryFile is a class that is either NamedTemporaryFile or an
# indirect subclass of io.IOBase, depending on the platform.
#
# * https://bugs.python.org/issue33762
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L552-L555
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L606-L608
# * https://github.com/python/cpython/blob/v3.8.0/Lib/tempfile.py#L617-L618
#
# As a result, for Python 2.6 and newer, it seems the best way to test
# for a file-like object inclusive of temporary files is via:
#
# isinstance(obj, (IOBase, SpooledTemporaryFile, _TemporaryFileWrapper))
# Of the following "types", only include those that are classes in
# file_type so that isinstance(obj, file_type) won't fail. As of Python
# 3.8 only IOBase, SpooledTemporaryFile and _TemporaryFileWrapper are
# classes, but if future Python versions implement NamedTemporaryFile
# and TemporaryFile as classes, the following code should account for
# that accordingly.
file_type = tuple(
type_ for type_ in [
IOBase,
NamedTemporaryFile,
SpooledTemporaryFile,
TemporaryFile,
_TemporaryFileWrapper,
]
if inspect.isclass(type_)
)
if six.PY2:
# noinspection PyUnresolvedReferences
file_type = file_type + (file,)
RE_NAME_BASE = u"[0-9a-zA-Z_][\\w\\d\\$_]*"
RE_NAME = u'(("{0}")|({0}))'.format(RE_NAME_BASE)
RE_BASIC_INSERT_STAT = (
u"\\s*INSERT\\s+INTO\\s+(?P<target>({0}\\.)?{0})"
u"\\s*\\(\\s*(?P<variables>{0}(\\s*,\\s*{0})*)\\s*\\)"
u"\\s+VALUES\\s*\\(\\s*(?P<values>(.|\\s)*)\\s*\\)").format(RE_NAME)
END_OF_RESULT_RESPONSES = (messages.CommandComplete, messages.PortalSuspended)
END_OF_BATCH_RESPONSES = (messages.WriteFile, messages.EndOfBatchResponse)
DEFAULT_BUFFER_SIZE = 131072
class Cursor(object):
# NOTE: this is used in executemany and is here for pandas compatibility
_insert_statement = re.compile(RE_BASIC_INSERT_STAT, re.U | re.I)
def __init__(self, connection, logger, cursor_type=None, unicode_error=None):
self.connection = connection
self._logger = logger
self.cursor_type = cursor_type
self.unicode_error = unicode_error if unicode_error is not None else 'strict'
self._closed = False
self._message = None
self.operation = None
self.prepared_sql = None # last statement been prepared
self.prepared_name = "s0"
self.error = None
self._sql_literal_adapters = {}
#
# dbapi attributes
#
self.description = None
self.rowcount = -1
self.arraysize = 1
#############################################
# supporting `with` statements
#############################################
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
#############################################
# dbapi methods
#############################################
# noinspection PyMethodMayBeStatic
def callproc(self, procname, parameters=None):
raise errors.NotSupportedError('Cursor.callproc() is not implemented')
def close(self):
self._logger.info('Close the cursor')
if not self.closed() and self.prepared_sql:
self._close_prepared_statement()
self._closed = True
def execute(self, operation, parameters=None, use_prepared_statements=None,
copy_stdin=None, buffer_size=DEFAULT_BUFFER_SIZE):
if self.closed():
raise errors.InterfaceError('Cursor is closed')
self.flush_to_query_ready()
operation = as_text(operation)
self.operation = operation
self.rowcount = -1
if copy_stdin is None:
self.copy_stdin_list = []
elif (isinstance(copy_stdin, list) and
all(callable(getattr(i, 'read', None)) for i in copy_stdin)):
self.copy_stdin_list = copy_stdin
elif callable(getattr(copy_stdin, 'read', None)):
self.copy_stdin_list = [copy_stdin]
else:
raise TypeError("Cursor.execute 'copy_stdin' parameter should be"
" a file-like object or a list of file-like objects")
self.buffer_size = buffer_size # For copy-local read and write
use_prepared = bool(self.connection.options['use_prepared_statements']
if use_prepared_statements is None else use_prepared_statements)
if use_prepared:
# Execute the SQL as prepared statement (server-side bindings)
if parameters and not isinstance(parameters, (list, tuple)):
raise TypeError("Execute parameters should be a list/tuple")
# If the SQL has not been prepared, prepare the SQL
if operation != self.prepared_sql:
self._prepare(operation)
self.prepared_sql = operation # the prepared statement is kept
# Bind the parameters and execute
self._execute_prepared_statement([parameters])
else:
# Execute the SQL directly (client-side bindings)
if parameters:
operation = self.format_operation_with_parameters(operation, parameters)
self._execute_simple_query(operation)
return self
def executemany(self, operation, seq_of_parameters, use_prepared_statements=None):
if not isinstance(seq_of_parameters, (list, tuple)):
raise TypeError("seq_of_parameters should be list/tuple")
if self.closed():
raise errors.InterfaceError('Cursor is closed')
self.flush_to_query_ready()
operation = as_text(operation)
self.operation = operation
use_prepared = bool(self.connection.options['use_prepared_statements']
if use_prepared_statements is None else use_prepared_statements)
if use_prepared:
# Execute the SQL as prepared statement (server-side bindings)
if len(seq_of_parameters) == 0:
raise ValueError("seq_of_parameters should not be empty")
if not all(isinstance(elem, (list, tuple)) for elem in seq_of_parameters):
raise TypeError("Each seq_of_parameters element should be a list/tuple")
# If the SQL has not been prepared, prepare the SQL
if operation != self.prepared_sql:
self._prepare(operation)
self.prepared_sql = operation # the prepared statement is kept
# Bind the parameters and execute
self._execute_prepared_statement(seq_of_parameters)
else:
m = self._insert_statement.match(operation)
if m:
target = as_text(m.group('target'))
variables = as_text(m.group('variables'))
variables = ",".join([variable.strip().strip('"') for variable in variables.split(",")])
values = as_text(m.group('values'))
values = ",".join([value.strip().strip('"') for value in values.split(",")])
seq_of_values = [self.format_operation_with_parameters(values, parameters, is_copy_data=True)
for parameters in seq_of_parameters]
data = "\n".join(seq_of_values)
copy_statement = (
u"COPY {0} ({1}) FROM STDIN DELIMITER ',' ENCLOSED BY '\"' "
u"ENFORCELENGTH ABORT ON ERROR{2}").format(target, variables,
" NO COMMIT" if not self.connection.autocommit else '')
self.copy(copy_statement, data)
else:
raise NotImplementedError(
"executemany is implemented for simple INSERT statements only")
def fetchone(self):
while True:
if isinstance(self._message, messages.DataRow):
if self.rowcount == -1:
self.rowcount = 1
else:
self.rowcount += 1
row = self.row_formatter(self._message)
# fetch next message
self._message = self.connection.read_message()
return row
elif isinstance(self._message, messages.RowDescription):
self.description = [Column(fd, self.unicode_error) for fd in self._message.fields]
elif isinstance(self._message, messages.ReadyForQuery):
return None
elif isinstance(self._message, END_OF_RESULT_RESPONSES):
return None
elif isinstance(self._message, messages.EmptyQueryResponse):
pass
elif isinstance(self._message, messages.VerifyFiles):
self._handle_copy_local_protocol()
elif isinstance(self._message, messages.EndOfBatchResponse):
pass
elif isinstance(self._message, messages.CopyDoneResponse):
pass
elif isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, self.operation)
else:
raise errors.MessageError('Unexpected fetchone() state: {}'.format(
type(self._message).__name__))
self._message = self.connection.read_message()
def fetchmany(self, size=None):
if not size:
size = self.arraysize
results = []
while True:
row = self.fetchone()
if not row:
break
results.append(row)
if len(results) >= size:
break
return results
def fetchall(self):
return list(self.iterate())
def nextset(self):
"""
Skip to the next available result set, discarding any remaining rows
from the current result set.
If there are no more result sets, this method returns False. Otherwise,
it returns a True and subsequent calls to the fetch*() methods will
return rows from the next result set.
"""
# skip any data for this set if exists
self.flush_to_end_of_result()
if self._message is None:
return False
elif isinstance(self._message, END_OF_RESULT_RESPONSES):
# there might be another set, read next message to find out
self._message = self.connection.read_message()
if isinstance(self._message, messages.RowDescription):
self.description = [Column(fd, self.unicode_error) for fd in self._message.fields]
self._message = self.connection.read_message()
if isinstance(self._message, messages.VerifyFiles):
self._handle_copy_local_protocol()
self.rowcount = -1
return True
elif isinstance(self._message, messages.BindComplete):
self._message = self.connection.read_message()
self.rowcount = -1
return True
elif isinstance(self._message, messages.ReadyForQuery):
return False
elif isinstance(self._message, END_OF_RESULT_RESPONSES):
# result of a DDL/transaction
self.rowcount = -1
return True
elif isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, self.operation)
else:
raise errors.MessageError(
'Unexpected nextset() state after END_OF_RESULT_RESPONSES: {0}'.format(self._message))
elif isinstance(self._message, messages.ReadyForQuery):
# no more sets left to be read
return False
else:
raise errors.MessageError('Unexpected nextset() state: {0}'.format(self._message))
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
#############################################
# non-dbapi methods
#############################################
def closed(self):
return self._closed or self.connection.closed()
def cancel(self):
# Cancel is a session-level operation, cursor-level API does not make
# sense. Keep this API for backward compatibility.
raise errors.NotSupportedError(
'Cursor.cancel() is deprecated. Call Connection.cancel() '
'to cancel the current database operation.')
def iterate(self):
row = self.fetchone()
while row:
yield row
row = self.fetchone()
def copy(self, sql, data, **kwargs):
"""
EXAMPLE:
>> with open("/tmp/file.csv", "rb") as fs:
>> cursor.copy("COPY table(field1,field2) FROM STDIN DELIMITER ',' ENCLOSED BY ''''",
>> fs, buffer_size=65536)
"""
sql = as_text(sql)
if self.closed():
raise errors.InterfaceError('Cursor is closed')
self.flush_to_query_ready()
if isinstance(data, binary_type):
stream = BytesIO(data)
elif isinstance(data, text_type):
stream = StringIO(data)
elif isinstance(data, file_type) or callable(getattr(data, 'read', None)):
stream = data
else:
raise TypeError("Not valid type of data {0}".format(type(data)))
# TODO: check sql is a valid `COPY FROM STDIN` SQL statement
self._logger.info(u'Execute COPY statement: [{}]'.format(sql))
# Execute a `COPY FROM STDIN` SQL statement
self.connection.write(messages.Query(sql))
buffer_size = kwargs['buffer_size'] if 'buffer_size' in kwargs else DEFAULT_BUFFER_SIZE
while True:
message = self.connection.read_message()
self._message = message
if isinstance(message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(message, sql)
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.CommandComplete):
pass
elif isinstance(message, messages.CopyInResponse):
try:
self._send_copy_data(stream, buffer_size)
except Exception as e:
# COPY termination: report the cause of failure to the backend
self.connection.write(messages.CopyFail(str(e)))
self._logger.error(str(e))
self.flush_to_query_ready()
raise errors.DataError('Failed to send a COPY data stream: {}'.format(str(e)))
# Successful termination for COPY
self.connection.write(messages.CopyDone())
else:
raise errors.MessageError('Unexpected message: {0}'.format(message))
if self.error is not None:
raise self.error
def object_to_sql_literal(self, py_obj):
return self.object_to_string(py_obj, False)
def register_sql_literal_adapter(self, obj_type, adapter_func):
if not callable(adapter_func):
raise TypeError("Cannot register this sql literal adapter. The adapter is not callable.")
self._sql_literal_adapters[obj_type] = adapter_func
#############################################
# internal
#############################################
def flush_to_query_ready(self):
# if the last message isn't empty or ReadyForQuery, read all remaining messages
if self._message is None \
or isinstance(self._message, messages.ReadyForQuery):
return
while True:
message = self.connection.read_message()
if isinstance(message, messages.ReadyForQuery):
self._message = message
break
elif isinstance(message, messages.VerifyFiles):
self._message = message
self._handle_copy_local_protocol()
def flush_to_end_of_result(self):
# if the last message isn't empty or END_OF_RESULT_RESPONSES,
# read messages until it is
if (self._message is None or
isinstance(self._message, messages.ReadyForQuery) or
isinstance(self._message, END_OF_RESULT_RESPONSES)):
return
while True:
message = self.connection.read_message()
if isinstance(message, END_OF_RESULT_RESPONSES):
self._message = message
break
def row_formatter(self, row_data):
if self.cursor_type is None:
return self.format_row_as_array(row_data)
elif self.cursor_type in (list, 'list'):
return self.format_row_as_array(row_data)
elif self.cursor_type in (dict, 'dict'):
return self.format_row_as_dict(row_data)
else:
raise TypeError('Unrecognized cursor_type: {0}'.format(self.cursor_type))
def format_row_as_dict(self, row_data):
return OrderedDict(
(descr.name, descr.convert(value))
for descr, value in zip(self.description, row_data.values)
)
def format_row_as_array(self, row_data):
return [descr.convert(value)
for descr, value in zip(self.description, row_data.values)]
def object_to_string(self, py_obj, is_copy_data):
"""Return the SQL representation of the object as a string"""
if type(py_obj) in self._sql_literal_adapters and not is_copy_data:
adapter = self._sql_literal_adapters[type(py_obj)]
result = adapter(py_obj)
if not isinstance(result, (string_types, bytes)):
raise TypeError("Unexpected return type of {} adapter: {}, expected a string type."
.format(type(py_obj), type(result)))
return as_text(result)
if isinstance(py_obj, type(None)):
return '' if is_copy_data else 'NULL'
elif isinstance(py_obj, bool):
return str(py_obj)
elif isinstance(py_obj, (string_types, bytes)):
return self.format_quote(as_text(py_obj), is_copy_data)
elif isinstance(py_obj, (integer_types, float, Decimal)):
return str(py_obj)
elif isinstance(py_obj, tuple): # tuple and namedtuple
elements = [None] * len(py_obj)
for i in range(len(py_obj)):
elements[i] = self.object_to_string(py_obj[i], is_copy_data)
return "(" + ",".join(elements) + ")"
elif isinstance(py_obj, (datetime.datetime, datetime.date, datetime.time, UUID)):
return self.format_quote(as_text(str(py_obj)), is_copy_data)
else:
if is_copy_data:
return str(py_obj)
else:
msg = ("Cannot convert {} type object to an SQL string. "
"Please register a new adapter for this type via the "
"Cursor.register_sql_literal_adapter() function."
.format(type(py_obj)))
raise TypeError(msg)
# noinspection PyArgumentList
def format_operation_with_parameters(self, operation, parameters, is_copy_data=False):
operation = as_text(operation)
if isinstance(parameters, dict):
for key, param in six.iteritems(parameters):
if not isinstance(key, string_types):
key = str(key)
key = as_text(key)
value = self.object_to_string(param, is_copy_data)
# Using a regex with word boundary to correctly handle params with similar names
# such as :s and :start
match_str = u":{0}\\b".format(key)
operation = re.sub(match_str, lambda _: value, operation, flags=re.U)
elif isinstance(parameters, (tuple, list)):
tlist = []
for param in parameters:
value = self.object_to_string(param, is_copy_data)
tlist.append(value)
operation = operation % tuple(tlist)
else:
raise TypeError("Argument 'parameters' must be dict or tuple/list")
return operation
def format_quote(self, param, is_copy_data):
if is_copy_data:
return u'"{0}"'.format(re.escape(param))
else:
return u"'{0}'".format(param.replace(u"'", u"''"))
def _execute_simple_query(self, query):
"""
Send the query to the server using the simple query protocol.
Return True if this query contained no SQL (e.g. the string "--comment")
"""
self._logger.info(u'Execute simple query: [{}]'.format(query))
# All of the statements in the query are sent here in a single message
self.connection.write(messages.Query(query))
# The first response could be a number of things:
# ErrorResponse: Something went wrong on the server.
# EmptyQueryResponse: The query being executed is empty.
# RowDescription: This is the "normal" case when executing a query.
# It marks the start of the results.
# CommandComplete: This occurs when executing DDL/transactions.
self._message = self.connection.read_message()
if isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, query)
elif isinstance(self._message, messages.RowDescription):
self.description = [Column(fd, self.unicode_error) for fd in self._message.fields]
self._message = self.connection.read_message()
if isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, query)
elif isinstance(self._message, messages.VerifyFiles):
self._handle_copy_local_protocol()
def _handle_copy_local_protocol(self):
if self.connection.options['disable_copy_local']:
msg = 'COPY LOCAL operation is disabled.'
self.connection.write(messages.CopyError(msg))
self.flush_to_query_ready()
raise errors.InterfaceError(msg)
# Extract info from VerifyFiles message
input_files = self._message.input_files
rejections_file = self._message.rejections_file
exceptions_file = self._message.exceptions_file
# Verify the file(s) present in the COPY FROM LOCAL statement are indeed accessible
self.valid_write_file_path = []
try:
# Check that the output files are writable
if rejections_file != '':
if rejections_file not in self.operation:
raise errors.MessageError('Server requests for writing to'
' invalid rejected file path: {}'.format(rejections_file))
os_utils.check_file_writable(rejections_file)
self.valid_write_file_path.append(rejections_file)
if exceptions_file != '':
if exceptions_file not in self.operation:
raise errors.MessageError('Server requests for writing to'
' invalid exceptions file path: {}'.format(exceptions_file))
os_utils.check_file_writable(exceptions_file)
self.valid_write_file_path.append(exceptions_file)
# Check that the input files are readable
self.valid_read_file_path = self._check_copy_local_files(input_files)
self.connection.write(messages.VerifiedFiles(self.valid_read_file_path))
except Exception as e:
tb = sys.exc_info()[2]
stk = traceback.extract_tb(tb, 1)
self.connection.write(messages.CopyError(str(e), stk[0]))
self.flush_to_query_ready()
raise
# Server should be ready to receive copy data from STDIN/files
self._message = self.connection.read_message()
if isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, self.operation)
elif not isinstance(self._message, (messages.CopyInResponse, messages.LoadFile)):
raise errors.MessageError('Unexpected COPY FROM LOCAL state: {}'.format(
type(self._message).__name__))
try:
if isinstance(self._message, messages.CopyInResponse):
self._logger.info('Sending STDIN data to server')
if len(self.copy_stdin_list) == 0:
raise ValueError('No STDIN source to load. Please specify "copy_stdin" parameter in Cursor.execute()')
stdin = self.copy_stdin_list.pop(0)
self._send_copy_data(stdin, self.buffer_size)
self.connection.write(messages.EndOfBatchRequest())
self._read_copy_data_response(is_stdin_copy=True)
elif isinstance(self._message, messages.LoadFile):
while True:
self._send_copy_file_data()
if not self._read_copy_data_response():
break
except Exception as e:
tb = sys.exc_info()[2]
stk = traceback.extract_tb(tb, 1)
self.connection.write(messages.CopyError(str(e), stk[0]))
self.flush_to_query_ready()
raise
def _check_copy_local_files(self, input_files):
# Return an empty list when the copy input is STDIN
if len(input_files) == 0:
return []
file_list = []
for file_pattern in input_files:
if file_pattern not in self.operation:
raise errors.MessageError('Server requests for loading invalid'
' file: {}, Query: {}'.format(file_pattern, self.operation))
# Expand the glob patterns
expanded_files = glob.glob(file_pattern)
if len(expanded_files) == 0:
raise OSError('{} does not exist'.format(file_pattern))
# Check file permissions
for f in expanded_files:
os_utils.check_file_readable(f)
file_list.append(f)
# Return a non-empty list when the copy input is FILE
# Note: Sending an empty list of files will make server kill the session.
return file_list
def _send_copy_data(self, stream, buffer_size):
# Send zero or more CopyData messages, forming a stream of input data
while True:
chunk = stream.read(buffer_size)
if not chunk:
break
self.connection.write(messages.CopyData(chunk, self.unicode_error))
def _send_copy_file_data(self):
filename = self._message.filename
self._logger.info('Sending {} data to server'.format(filename))
if filename not in self.valid_read_file_path:
raise errors.MessageError('Server requests for loading invalid'
' file: {}'.format(filename))
with open(filename, "rb") as f:
self._send_copy_data(f, self.buffer_size)
self.connection.write(messages.EndOfBatchRequest())
def _read_copy_data_response(self, is_stdin_copy=False):
"""Return True if the server wants us to load more data, false if we are done"""
self._message = self.connection.read_expected_message(END_OF_BATCH_RESPONSES)
# Check for rejections during this load
while isinstance(self._message, messages.WriteFile):
if self._message.filename == '':
self._logger.info('COPY-LOCAL rejected row numbers: {}'.format(self._message.rejected_rows))
elif self._message.filename in self.valid_write_file_path:
self._message.write_to_disk(self.connection, self.buffer_size)
else:
raise errors.MessageError('Server requests for writing to'
' invalid file path: {}'.format(self._message.filename))
self._message = self.connection.read_expected_message(END_OF_BATCH_RESPONSES)
# For STDIN copy, there will be no incoming message until we send
# another EndOfBatchRequest or CopyDone
if is_stdin_copy:
self.connection.write(messages.CopyDone()) # End this copy
return False
# For file copy, peek the next message
self._message = self.connection.read_message()
if isinstance(self._message, messages.LoadFile):
# Indicate there are more local files to load
return True
elif not isinstance(self._message, messages.CopyDoneResponse):
raise errors.MessageError('Unexpected COPY-LOCAL message: {0}'.format(message))
return False
def _error_handler(self, msg):
self.connection.write(messages.Sync())
raise errors.QueryError.from_error_response(msg, self.operation)
def _prepare(self, query):
"""
Send the query to be prepared to the server. The server will parse the
query and return some metadata.
"""
self._logger.info(u'Prepare a statement: [{}]'.format(query))
# Send Parse message to server
# We don't need to tell the server the parameter types yet
self.connection.write(messages.Parse(self.prepared_name, query, param_types=()))
# Send Describe message to server
self.connection.write(messages.Describe('prepared_statement', self.prepared_name))
self.connection.write(messages.Flush())
# Read expected message: ParseComplete
self._message = self.connection.read_expected_message(messages.ParseComplete, self._error_handler)
# Read expected message: ParameterDescription
self._message = self.connection.read_expected_message(messages.ParameterDescription, self._error_handler)
self._param_metadata = self._message.parameters
# Read expected message: RowDescription or NoData
self._message = self.connection.read_expected_message(
(messages.RowDescription, messages.NoData), self._error_handler)
if isinstance(self._message, messages.NoData):
self.description = None # response was NoData for a DDL/transaction PreparedStatement
else:
self.description = [Column(fd, self.unicode_error) for fd in self._message.fields]
# Read expected message: CommandDescription
self._message = self.connection.read_expected_message(messages.CommandDescription, self._error_handler)
if len(self._message.command_tag) == 0:
msg = 'The statement being prepared is empty'
self._logger.error(msg)
self.connection.write(messages.Sync())
raise errors.EmptyQueryError(msg)
self._logger.info('Finish preparing the statement')
def _execute_prepared_statement(self, list_of_parameter_values):
"""
Send multiple statement parameter sets to the server using the extended
query protocol. The server would bind and execute each set of parameter
values.
This function should not be called without first calling _prepare() to
prepare a statement.
"""
portal_name = ""
parameter_type_oids = [metadata['data_type_oid'] for metadata in self._param_metadata]
parameter_count = len(self._param_metadata)
try:
if len(list_of_parameter_values) == 0:
raise ValueError("Empty list/tuple, nothing to execute")
for parameter_values in list_of_parameter_values:
if parameter_values is None:
parameter_values = ()
self._logger.info(u'Bind parameters: {}'.format(parameter_values))
if len(parameter_values) != parameter_count:
msg = ("Invalid number of parameters for {}: {} given, {} expected"
.format(parameter_values, len(parameter_values), parameter_count))
raise ValueError(msg)
self.connection.write(messages.Bind(portal_name, self.prepared_name,
parameter_values, parameter_type_oids))
self.connection.write(messages.Execute(portal_name, 0))
self.connection.write(messages.Sync())
except Exception as e:
self._logger.error(str(e))
# the server will not send anything until we issue a sync
self.connection.write(messages.Sync())
self._message = self.connection.read_message()
raise
self.connection.write(messages.Flush())
# Read expected message: BindComplete
self.connection.read_expected_message(messages.BindComplete)
self._message = self.connection.read_message()
if isinstance(self._message, messages.ErrorResponse):
raise errors.QueryError.from_error_response(self._message, self.prepared_sql)
def _close_prepared_statement(self):
"""
Close the prepared statement on the server.
"""
self.prepared_sql = None
self.flush_to_query_ready()
self.connection.write(messages.Close('prepared_statement', self.prepared_name))
self.connection.write(messages.Flush())
self._message = self.connection.read_expected_message(messages.CloseComplete)
self.connection.write(messages.Sync())
|
|
import numpy as np
import pandas as pd
from zipline.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date))
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions
|
|
"""
Test model API
"""
import random
import json
import six
import copy
from datetime import datetime
from botocore.client import ClientError
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.compat import OrderedDict
from pynamodb.throttle import Throttle
from pynamodb.connection.util import pythonic
from pynamodb.exceptions import TableError
from pynamodb.types import RANGE
from pynamodb.constants import (
ITEM, STRING_SHORT, ALL, KEYS_ONLY, INCLUDE, REQUEST_ITEMS, UNPROCESSED_KEYS, ITEM_COUNT,
RESPONSES, KEYS, ITEMS, LAST_EVALUATED_KEY, EXCLUSIVE_START_KEY, ATTRIBUTES, BINARY_SHORT
)
from pynamodb.models import Model
from pynamodb.indexes import (
GlobalSecondaryIndex, LocalSecondaryIndex, AllProjection,
IncludeProjection, KeysOnlyProjection, Index
)
from pynamodb.attributes import (
UnicodeAttribute, NumberAttribute, BinaryAttribute, UTCDateTimeAttribute,
UnicodeSetAttribute, NumberSetAttribute, BinarySetAttribute)
from .data import (
MODEL_TABLE_DATA, GET_MODEL_ITEM_DATA, SIMPLE_MODEL_TABLE_DATA,
BATCH_GET_ITEMS, SIMPLE_BATCH_GET_ITEMS, COMPLEX_TABLE_DATA,
COMPLEX_ITEM_DATA, INDEX_TABLE_DATA, LOCAL_INDEX_TABLE_DATA,
CUSTOM_ATTR_NAME_INDEX_TABLE_DATA, CUSTOM_ATTR_NAME_ITEM_DATA,
BINARY_ATTR_DATA, SERIALIZED_TABLE_DATA
)
if six.PY3:
from unittest.mock import patch, MagicMock
else:
from mock import patch, MagicMock
PATCH_METHOD = 'botocore.client.BaseClient._make_api_call'
class GamePlayerOpponentIndex(LocalSecondaryIndex):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
table_name = "GamePlayerOpponentIndex"
host = "http://localhost:8000"
projection = AllProjection()
player_id = UnicodeAttribute(hash_key=True)
winner_id = UnicodeAttribute(range_key=True)
class GameOpponentTimeIndex(GlobalSecondaryIndex):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
table_name = "GameOpponentTimeIndex"
host = "http://localhost:8000"
projection = AllProjection()
winner_id = UnicodeAttribute(hash_key=True)
created_time = UnicodeAttribute(range_key=True)
class GameModel(Model):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
table_name = "GameModel"
host = "http://localhost:8000"
player_id = UnicodeAttribute(hash_key=True)
created_time = UTCDateTimeAttribute(range_key=True)
winner_id = UnicodeAttribute()
loser_id = UnicodeAttribute(null=True)
player_opponent_index = GamePlayerOpponentIndex()
opponent_time_index = GameOpponentTimeIndex()
class OldStyleModel(Model):
_table_name = 'IndexedModel'
user_name = UnicodeAttribute(hash_key=True)
class EmailIndex(GlobalSecondaryIndex):
"""
A global secondary index for email addresses
"""
class Meta:
index_name = 'custom_idx_name'
read_capacity_units = 2
write_capacity_units = 1
projection = AllProjection()
email = UnicodeAttribute(hash_key=True)
alt_numbers = NumberSetAttribute(range_key=True, attr_name='numbers')
class LocalEmailIndex(LocalSecondaryIndex):
"""
A global secondary index for email addresses
"""
class Meta:
read_capacity_units = 2
write_capacity_units = 1
projection = AllProjection()
email = UnicodeAttribute(hash_key=True)
numbers = NumberSetAttribute(range_key=True)
class NonKeyAttrIndex(LocalSecondaryIndex):
class Meta:
index_name = "non_key_idx"
read_capacity_units = 2
write_capacity_units = 1
projection = IncludeProjection(non_attr_keys=['numbers'])
email = UnicodeAttribute(hash_key=True)
numbers = NumberSetAttribute(range_key=True)
class IndexedModel(Model):
"""
A model with an index
"""
class Meta:
table_name = 'IndexedModel'
user_name = UnicodeAttribute(hash_key=True)
email = UnicodeAttribute()
email_index = EmailIndex()
include_index = NonKeyAttrIndex()
numbers = NumberSetAttribute()
aliases = UnicodeSetAttribute()
icons = BinarySetAttribute()
class LocalIndexedModel(Model):
"""
A model with an index
"""
class Meta:
table_name = 'LocalIndexedModel'
user_name = UnicodeAttribute(hash_key=True)
email = UnicodeAttribute()
email_index = LocalEmailIndex()
numbers = NumberSetAttribute()
aliases = UnicodeSetAttribute()
icons = BinarySetAttribute()
class SimpleUserModel(Model):
"""
A hash key only model
"""
class Meta:
table_name = 'SimpleModel'
user_name = UnicodeAttribute(hash_key=True)
email = UnicodeAttribute()
numbers = NumberSetAttribute()
custom_aliases = UnicodeSetAttribute(attr_name='aliases')
icons = BinarySetAttribute()
views = NumberAttribute(null=True)
class ThrottledUserModel(Model):
"""
A testing model
"""
class Meta:
table_name = 'UserModel'
user_name = UnicodeAttribute(hash_key=True)
user_id = UnicodeAttribute(range_key=True)
throttle = Throttle('50')
class CustomAttrIndex(LocalSecondaryIndex):
class Meta:
read_capacity_units = 2
write_capacity_units = 1
projection = AllProjection()
overidden_uid = UnicodeAttribute(hash_key=True, attr_name='user_id')
class CustomAttrNameModel(Model):
"""
A testing model
"""
class Meta:
table_name = 'CustomAttrModel'
overidden_user_name = UnicodeAttribute(hash_key=True, attr_name='user_name')
overidden_user_id = UnicodeAttribute(range_key=True, attr_name='user_id')
overidden_attr = UnicodeAttribute(attr_name='foo_attr', null=True)
uid_index = CustomAttrIndex()
class UserModel(Model):
"""
A testing model
"""
class Meta:
table_name = 'UserModel'
read_capacity_units = 25
write_capacity_units = 25
custom_user_name = UnicodeAttribute(hash_key=True, attr_name='user_name')
user_id = UnicodeAttribute(range_key=True)
picture = BinaryAttribute(null=True)
zip_code = NumberAttribute(null=True)
email = UnicodeAttribute(default='needs_email')
callable_field = NumberAttribute(default=lambda: 42)
class HostSpecificModel(Model):
"""
A testing model
"""
class Meta:
host = 'http://localhost'
table_name = 'RegionSpecificModel'
user_name = UnicodeAttribute(hash_key=True)
user_id = UnicodeAttribute(range_key=True)
class RegionSpecificModel(Model):
"""
A testing model
"""
class Meta:
region = 'us-west-1'
table_name = 'RegionSpecificModel'
user_name = UnicodeAttribute(hash_key=True)
user_id = UnicodeAttribute(range_key=True)
class ComplexKeyModel(Model):
"""
This model has a key that must be serialized/deserialized properly
"""
class Meta:
table_name = 'ComplexKey'
name = UnicodeAttribute(hash_key=True)
date_created = UTCDateTimeAttribute(default=datetime.utcnow)
class ModelTestCase(TestCase):
"""
Tests for the models API
"""
def assert_dict_lists_equal(self, list1, list2):
"""
Compares two lists of dictionaries
"""
for d1_item in list1:
found = False
for d2_item in list2:
if d2_item.items() == d1_item.items():
found = True
if not found:
if six.PY3:
# TODO WTF python2?
raise AssertionError("Values not equal: {0} {1}".format(d1_item, list2))
if len(list1) != len(list2):
raise AssertionError("Values not equal: {0} {1}".format(list1, list2))
def test_create_model(self):
"""
Model.create_table
"""
self.maxDiff = None
scope_args = {'count': 0}
def fake_dynamodb(*args):
kwargs = args[1]
if kwargs == {'TableName': UserModel.Meta.table_name}:
if scope_args['count'] == 0:
return {}
else:
return MODEL_TABLE_DATA
else:
return {}
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db):
with patch("pynamodb.connection.TableConnection.describe_table") as req:
req.return_value = None
with self.assertRaises(TableError):
UserModel.create_table(read_capacity_units=2, write_capacity_units=2, wait=True)
with patch(PATCH_METHOD, new=fake_db) as req:
UserModel.create_table(read_capacity_units=2, write_capacity_units=2)
# Test for default region
self.assertEqual(UserModel.Meta.region, 'us-east-1')
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
UserModel.create_table(read_capacity_units=2, write_capacity_units=2)
# The default region is us-east-1
self.assertEqual(UserModel._connection.connection.region, 'us-east-1')
# A table with a specified region
self.assertEqual(RegionSpecificModel.Meta.region, 'us-west-1')
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
RegionSpecificModel.create_table(read_capacity_units=2, write_capacity_units=2)
self.assertEqual(RegionSpecificModel._connection.connection.region, 'us-west-1')
# A table with a specified host
self.assertEqual(HostSpecificModel.Meta.host, 'http://localhost')
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
HostSpecificModel.create_table(read_capacity_units=2, write_capacity_units=2)
self.assertEqual(HostSpecificModel._connection.connection.host, 'http://localhost')
# A table with a specified capacity
self.assertEqual(UserModel.Meta.read_capacity_units, 25)
self.assertEqual(UserModel.Meta.write_capacity_units, 25)
UserModel._connection = None
def fake_wait(*obj, **kwargs):
if scope_args['count'] == 0:
scope_args['count'] += 1
raise ClientError({'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Not Found'}},
"DescribeTable")
elif scope_args['count'] == 1 or scope_args['count'] == 2:
data = copy.deepcopy(MODEL_TABLE_DATA)
data['Table']['TableStatus'] = 'Creating'
scope_args['count'] += 1
return data
else:
return MODEL_TABLE_DATA
mock_wait = MagicMock()
mock_wait.side_effect = fake_wait
scope_args = {'count': 0}
with patch(PATCH_METHOD, new=mock_wait) as req:
UserModel.create_table(wait=True)
params = {
'AttributeDefinitions': [
{
'AttributeName': 'user_name',
'AttributeType': 'S'
},
{
'AttributeName': 'user_id',
'AttributeType': 'S'
}
],
'KeySchema': [
{
'AttributeName': 'user_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'user_id',
'KeyType': 'RANGE'
}
],
'ProvisionedThroughput': {
'ReadCapacityUnits': 25, 'WriteCapacityUnits': 25
},
'TableName': 'UserModel'
}
self.assertEqual(req.call_args_list[1][0][1], params)
def bad_server(*args):
if scope_args['count'] == 0:
scope_args['count'] += 1
return {}
elif scope_args['count'] == 1 or scope_args['count'] == 2:
return {}
bad_mock_server = MagicMock()
bad_mock_server.side_effect = bad_server
scope_args = {'count': 0}
with patch(PATCH_METHOD, new=bad_mock_server) as req:
self.assertRaises(
TableError,
UserModel.create_table,
read_capacity_units=2,
write_capacity_units=2,
wait=True
)
def test_model_attrs(self):
"""
Model()
"""
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
item = UserModel('foo', 'bar')
self.assertEqual(item.email, 'needs_email')
self.assertEqual(item.callable_field, 42)
self.assertEqual(
repr(item), '{0}<{1}, {2}>'.format(UserModel.Meta.table_name, item.custom_user_name, item.user_id)
)
self.assertEqual(repr(UserModel._get_meta_data()), 'MetaTable<{0}>'.format('Thread'))
with patch(PATCH_METHOD) as req:
req.return_value = SIMPLE_MODEL_TABLE_DATA
item = SimpleUserModel('foo')
self.assertEqual(repr(item), '{0}<{1}>'.format(SimpleUserModel.Meta.table_name, item.user_name))
self.assertRaises(ValueError, item.save)
self.assertRaises(ValueError, UserModel.from_raw_data, None)
with patch(PATCH_METHOD) as req:
req.return_value = CUSTOM_ATTR_NAME_INDEX_TABLE_DATA
item = CustomAttrNameModel('foo', 'bar', overidden_attr='test')
self.assertEqual(item.overidden_attr, 'test')
self.assertTrue(not hasattr(item, 'foo_attr'))
def test_overidden_defaults(self):
"""
Custom attribute names
"""
schema = CustomAttrNameModel._get_schema()
correct_schema = {
'KeySchema': [
{'key_type': 'HASH', 'attribute_name': 'user_name'},
{'key_type': 'RANGE', 'attribute_name': 'user_id'}
],
'AttributeDefinitions': [
{'attribute_type': 'S', 'attribute_name': 'user_name'},
{'attribute_type': 'S', 'attribute_name': 'user_id'}
]
}
self.assert_dict_lists_equal(correct_schema['KeySchema'], schema['key_schema'])
self.assert_dict_lists_equal(correct_schema['AttributeDefinitions'], schema['attribute_definitions'])
def test_refresh(self):
"""
Model.refresh
"""
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
item = UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = {}
self.assertRaises(item.DoesNotExist, item.refresh)
with patch(PATCH_METHOD) as req:
req.return_value = GET_MODEL_ITEM_DATA
item.refresh()
self.assertEqual(
item.user_name,
GET_MODEL_ITEM_DATA.get(ITEM).get('user_name').get(STRING_SHORT))
def test_complex_key(self):
"""
Model with complex key
"""
with patch(PATCH_METHOD) as req:
req.return_value = COMPLEX_TABLE_DATA
item = ComplexKeyModel('test')
with patch(PATCH_METHOD) as req:
req.return_value = COMPLEX_ITEM_DATA
item.refresh()
def test_delete(self):
"""
Model.delete
"""
UserModel._meta_table = None
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
item = UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete()
params = {
'Key': {
'user_id': {
'S': 'bar'
},
'user_name': {
'S': 'foo'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
args = req.call_args[0][1]
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete(user_id='bar')
params = {
'Key': {
'user_id': {
'S': 'bar'
},
'user_name': {
'S': 'foo'
}
},
'Expected': {
'user_id': {
'Value': {'S': 'bar'},
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
args = req.call_args[0][1]
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete(user_id='bar')
params = {
'Key': {
'user_id': {
'S': 'bar'
},
'user_name': {
'S': 'foo'
}
},
'Expected': {
'user_id': {
'Value': {'S': 'bar'},
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
args = req.call_args[0][1]
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete(user_id='bar', email__contains='@', conditional_operator='AND')
params = {
'Key': {
'user_id': {
'S': 'bar'
},
'user_name': {
'S': 'foo'
}
},
'Expected': {
'email': {
'AttributeValueList': [
{'S': '@'}
],
'ComparisonOperator': 'CONTAINS'
},
'user_id': {
'Value': {
'S': 'bar'
}
}
},
'ConditionalOperator': 'AND',
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
args = req.call_args[0][1]
self.assertEqual(args, params)
def test_update_item(self):
"""
Model.update_item
"""
with patch(PATCH_METHOD) as req:
req.return_value = SIMPLE_MODEL_TABLE_DATA
item = SimpleUserModel('foo', email='bar')
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save()
with patch(PATCH_METHOD) as req:
req.return_value = {
ATTRIBUTES: {
"views": {
"N": "10"
}
}
}
self.assertRaises(ValueError, item.update_item, 'views', 10)
with patch(PATCH_METHOD) as req:
req.return_value = {
ATTRIBUTES: {
"views": {
"N": "10"
}
}
}
item.update_item('views', 10, action='add')
args = req.call_args[0][1]
params = {
'TableName': 'SimpleModel',
'ReturnValues': 'ALL_NEW',
'Key': {
'user_name': {
'S': 'foo'
}
},
'AttributeUpdates': {
'views': {
'Action': 'ADD',
'Value': {
'N': '10'
}
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {
ATTRIBUTES: {
"views": {
"N": "10"
}
}
}
item.update_item('views', 10, action='add', user_name='foo', email__not_contains='@')
args = req.call_args[0][1]
params = {
'TableName': 'SimpleModel',
'ReturnValues': 'ALL_NEW',
'Key': {
'user_name': {
'S': 'foo'
}
},
'Expected': {
'user_name': {
'Value': {'S': 'foo'}
},
'email': {
'AttributeValueList': [
{'S': '@'}
],
'ComparisonOperator': 'NOT_CONTAINS'
},
},
'AttributeUpdates': {
'views': {
'Action': 'ADD',
'Value': {
'N': '10'
}
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {
ATTRIBUTES: {
"views": {
"N": "10"
}
}
}
item.update_item('views', 10, action='add', user_name__exists=False)
args = req.call_args[0][1]
params = {
'TableName': 'SimpleModel',
'ReturnValues': 'ALL_NEW',
'Key': {
'user_name': {
'S': 'foo'
}
},
'Expected': {
'user_name': {'Exists': False}
},
'AttributeUpdates': {
'views': {
'Action': 'ADD',
'Value': {
'N': '10'
}
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
self.assertEqual(args, params)
# Reproduces https://github.com/jlafon/PynamoDB/issues/59
with patch(PATCH_METHOD) as req:
user = UserModel("test_hash", "test_range")
req.return_value = {
ATTRIBUTES: {}
}
user.update_item('zip_code', 10, action='add')
args = req.call_args[0][1]
params = {
'AttributeUpdates': {
'zip_code': {'Action': 'ADD', 'Value': {'N': '10'}}
},
'TableName': 'UserModel',
'ReturnValues': 'ALL_NEW',
'Key': {
'user_id': {'S': u'test_range'},
'user_name': {'S': u'test_hash'}
},
'ReturnConsumedCapacity': 'TOTAL'}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {
ATTRIBUTES: {
"views": {
"N": "10"
}
}
}
# Reproduces https://github.com/jlafon/PynamoDB/issues/34
item.email = None
item.update_item('views', 10, action='add')
args = req.call_args[0][1]
params = {
'TableName': 'SimpleModel',
'ReturnValues': 'ALL_NEW',
'Key': {
'user_name': {
'S': 'foo'
}
},
'AttributeUpdates': {
'views': {
'Action': 'ADD',
'Value': {
'N': '10'
}
}
},
'ReturnConsumedCapacity': 'TOTAL'
}
self.assertEqual(args, params)
def test_save(self):
"""
Model.save
"""
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
item = UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save()
args = req.call_args[0][1]
params = {
'Item': {
'callable_field': {
'N': '42'
},
'email': {
'S': u'needs_email'
},
'user_id': {
'S': u'bar'
},
'user_name': {
'S': u'foo'
},
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save(email__exists=False)
args = req.call_args[0][1]
params = {
'Item': {
'callable_field': {
'N': '42'
},
'email': {
'S': u'needs_email'
},
'user_id': {
'S': u'bar'
},
'user_name': {
'S': u'foo'
},
},
'Expected': {
'email': {
'Exists': False
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save(email__exists=False, zip_code__null=False)
args = req.call_args[0][1]
params = {
'Item': {
'callable_field': {
'N': '42'
},
'email': {
'S': u'needs_email'
},
'user_id': {
'S': u'bar'
},
'user_name': {
'S': u'foo'
},
},
'Expected': {
'email': {
'Exists': False
},
'zip_code': {
'ComparisonOperator': 'NOT_NULL'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save(user_name='bar', zip_code__null=True, email__contains='@', conditional_operator='OR')
args = req.call_args[0][1]
params = {
'Item': {
'callable_field': {
'N': '42'
},
'email': {
'S': u'needs_email'
},
'user_id': {
'S': u'bar'
},
'user_name': {
'S': u'foo'
},
},
'ConditionalOperator': 'OR',
'Expected': {
'user_name': {
'Value': {'S': 'bar'}
},
'zip_code': {
'ComparisonOperator': 'NULL'
},
'email': {
'ComparisonOperator': 'CONTAINS',
'AttributeValueList': [
{'S': '@'}
]
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(args, params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
item.save(custom_user_name='foo')
args = req.call_args[0][1]
params = {
'Item': {
'callable_field': {
'N': '42'
},
'email': {
'S': u'needs_email'
},
'user_id': {
'S': u'bar'
},
'user_name': {
'S': u'foo'
},
},
'Expected': {
'user_name': {
'Value': {'S': 'foo'}
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(args, params)
def test_filter_count(self):
"""
Model.count(**filters)
"""
with patch(PATCH_METHOD) as req:
req.return_value = {'Count': 10}
res = UserModel.count('foo')
self.assertEqual(res, 10)
args = req.call_args[0][1]
params = {
'KeyConditions': {
'user_name': {
'ComparisonOperator': 'EQ',
'AttributeValueList': [{'S': u'foo'}]
}
},
'TableName': 'UserModel',
'ReturnConsumedCapacity': 'TOTAL',
'Select': 'COUNT'
}
self.assertEqual(args, params)
def test_count(self):
"""
Model.count()
"""
def fake_dynamodb(*args, **kwargs):
return MODEL_TABLE_DATA
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db) as req:
res = UserModel.count()
self.assertEqual(res, 42)
args = req.call_args[0][1]
params = {'TableName': 'UserModel'}
self.assertEqual(args, params)
def test_index_count(self):
"""
Model.index.count()
"""
with patch(PATCH_METHOD) as req:
req.return_value = {'Count': 42}
res = CustomAttrNameModel.uid_index.count('foo', limit=2, user_name__begins_with='bar')
self.assertEqual(res, 42)
args = req.call_args[0][1]
params = {
'KeyConditions': {
'user_name': {
'ComparisonOperator': 'BEGINS_WITH',
'AttributeValueList': [{'S': u'bar'}]
},
'user_id': {
'ComparisonOperator': 'EQ',
'AttributeValueList': [{'S': u'foo'}]
}
},
'Limit': 2,
'IndexName': 'uid_index',
'TableName': 'CustomAttrModel',
'ReturnConsumedCapacity': 'TOTAL',
'Select': 'COUNT'
}
self.assertEqual(args, params)
def test_query(self):
"""
Model.query
"""
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__between=['id-1', 'id-3']):
queried.append(item._serialize().get(RANGE))
self.assertListEqual(
[item.get('user_id').get(STRING_SHORT) for item in items],
queried
)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__gt='id-1', user_id__le='id-2'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__lt='id-1'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__ge='id-1'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__le='id-1'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__eq='id-1'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo', user_id__begins_with='id'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query('foo'):
queried.append(item._serialize())
self.assertTrue(len(queried) == len(items))
def fake_query(*args):
kwargs = args[1]
start_key = kwargs.get(EXCLUSIVE_START_KEY, None)
if start_key:
item_idx = 0
for query_item in BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name):
item_idx += 1
if query_item == start_key:
break
query_items = BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name)[item_idx:item_idx + 1]
else:
query_items = BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name)[:1]
data = {
ITEMS: query_items,
LAST_EVALUATED_KEY: query_items[-1] if len(query_items) else None
}
return data
mock_query = MagicMock()
mock_query.side_effect = fake_query
with patch(PATCH_METHOD, new=mock_query) as req:
for item in UserModel.query('foo'):
self.assertIsNotNone(item)
with patch(PATCH_METHOD) as req:
req.return_value = CUSTOM_ATTR_NAME_INDEX_TABLE_DATA
CustomAttrNameModel._get_meta_data()
with patch(PATCH_METHOD) as req:
req.return_value = {ITEMS: [CUSTOM_ATTR_NAME_ITEM_DATA.get(ITEM)]}
for item in CustomAttrNameModel.query('bar', overidden_user_name__eq='foo'):
self.assertIsNotNone(item)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query(
'foo',
user_id__begins_with='id',
email__contains='@',
picture__null=False,
zip_code__between=[2, 3]):
queried.append(item._serialize())
params = {
'KeyConditions': {
'user_id': {
'AttributeValueList': [
{'S': 'id'}
],
'ComparisonOperator': 'BEGINS_WITH'
},
'user_name': {
'AttributeValueList': [
{'S': 'foo'}
],
'ComparisonOperator': 'EQ'
}
},
'QueryFilter': {
'email': {
'AttributeValueList': [
{'S': '@'}
],
'ComparisonOperator': 'CONTAINS'
},
'zip_code': {
'ComparisonOperator': 'BETWEEN',
'AttributeValueList': [
{'N': '2'},
{'N': '3'}
]
},
'picture': {
'ComparisonOperator': 'NOT_NULL'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(params, req.call_args[0][1])
self.assertTrue(len(queried) == len(items))
def test_scan_limit(self):
"""
Model.scan(limit)
"""
def fake_scan(*args):
scan_items = BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name)
data = {
ITEM_COUNT: len(scan_items),
ITEMS: scan_items,
}
return data
mock_scan = MagicMock()
mock_scan.side_effect = fake_scan
with patch(PATCH_METHOD, new=mock_scan) as req:
count = 0
for item in UserModel.scan(limit=4):
count += 1
self.assertIsNotNone(item)
self.assertEqual(count, 4)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in UserModel.query(
'foo',
user_id__begins_with='id',
email__contains='@',
picture__null=False,
zip_code__ge=2,
conditional_operator='AND'):
queried.append(item._serialize())
params = {
'KeyConditions': {
'user_id': {
'AttributeValueList': [
{'S': 'id'}
],
'ComparisonOperator': 'BEGINS_WITH'
},
'user_name': {
'AttributeValueList': [
{'S': 'foo'}
],
'ComparisonOperator': 'EQ'
}
},
'query_filter': {
'email': {
'AttributeValueList': [
{'S': '@'}
],
'ComparisonOperator': 'CONTAINS'
},
'zip_code': {
'ComparisonOperator': 'GE',
'AttributeValueList': [
{'N': '2'},
]
},
'picture': {
'ComparisonOperator': 'NOT_NULL'
}
},
'ConditionalOperator': 'AND',
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
for key in ('ConditionalOperator', 'ReturnConsumedCapacity', 'TableName'):
self.assertEqual(req.call_args[0][1][key], params[key])
for key in ('user_id', 'user_name'):
self.assertEqual(
req.call_args[0][1]['KeyConditions'][key],
params['KeyConditions'][key]
)
for key in ('email', 'zip_code', 'picture'):
self.assertEqual(
sorted(req.call_args[0][1]['QueryFilter'][key].items(), key=lambda x: x[0]),
sorted(params['query_filter'][key].items(), key=lambda x: x[0]),
)
self.assertTrue(len(queried) == len(items))
def test_scan(self):
"""
Model.scan
"""
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
scanned_items = []
for item in UserModel.scan():
scanned_items.append(item._serialize().get(RANGE))
self.assertListEqual(
[item.get('user_id').get(STRING_SHORT) for item in items],
scanned_items
)
def fake_scan(*args):
kwargs = args[1]
start_key = kwargs.get(EXCLUSIVE_START_KEY, None)
if start_key:
item_idx = 0
for scan_item in BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name):
item_idx += 1
if scan_item == start_key:
break
scan_items = BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name)[item_idx:item_idx + 1]
else:
scan_items = BATCH_GET_ITEMS.get(RESPONSES).get(UserModel.Meta.table_name)[:1]
data = {
ITEMS: scan_items,
LAST_EVALUATED_KEY: scan_items[-1] if len(scan_items) else None
}
return data
mock_scan = MagicMock()
mock_scan.side_effect = fake_scan
with patch(PATCH_METHOD, new=mock_scan) as req:
for item in UserModel.scan():
self.assertIsNotNone(item)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
for item in UserModel.scan(user_id__contains='tux', zip_code__null=False, email__null=True):
self.assertIsNotNone(item)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'ScanFilter': {
'user_id': {
'AttributeValueList': [
{'S': 'tux'}
],
'ComparisonOperator': 'CONTAINS'
},
'zip_code': {
'ComparisonOperator': 'NOT_NULL'
},
'email': {
'ComparisonOperator': 'NULL'
}
},
'TableName': 'UserModel'
}
self.assertEquals(params, req.call_args[0][1])
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
for item in UserModel.scan(
user_id__contains='tux',
zip_code__null=False,
conditional_operator='OR',
email__null=True):
self.assertIsNotNone(item)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'ScanFilter': {
'user_id': {
'AttributeValueList': [
{'S': 'tux'}
],
'ComparisonOperator': 'CONTAINS'
},
'zip_code': {
'ComparisonOperator': 'NOT_NULL'
},
'email': {
'ComparisonOperator': 'NULL'
},
},
'ConditionalOperator': 'OR',
'TableName': 'UserModel'
}
self.assertEquals(params, req.call_args[0][1])
def test_get(self):
"""
Model.get
"""
def fake_dynamodb(*args):
kwargs = args[1]
if kwargs == {'TableName': UserModel.Meta.table_name}:
return MODEL_TABLE_DATA
elif kwargs == {
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel',
'Key': {
'user_name': {'S': 'foo'},
'user_id': {'S': 'bar'}
},
'ConsistentRead': False}:
return GET_MODEL_ITEM_DATA
return MODEL_TABLE_DATA
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db) as req:
item = UserModel.get(
'foo',
'bar'
)
self.assertEqual(item._get_keys(), {'user_id': 'bar', 'user_name': 'foo'})
params = {
'ConsistentRead': False,
'Key': {
'user_id': {
'S': 'bar'
},
'user_name': {
'S': 'foo'
}
},
'ReturnConsumedCapacity': 'TOTAL',
'TableName': 'UserModel'
}
self.assertEqual(req.call_args[0][1], params)
item.zip_code = 88030
self.assertEqual(item.zip_code, 88030)
with patch(PATCH_METHOD) as req:
req.return_value = {}
self.assertRaises(UserModel.DoesNotExist, UserModel.get, 'foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = CUSTOM_ATTR_NAME_INDEX_TABLE_DATA
CustomAttrNameModel._get_meta_data()
with patch(PATCH_METHOD) as req:
req.return_value = {"ConsumedCapacity": {"CapacityUnits": 0.5, "TableName": "UserModel"}}
self.assertRaises(CustomAttrNameModel.DoesNotExist, CustomAttrNameModel.get, 'foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = {}
self.assertRaises(CustomAttrNameModel.DoesNotExist, CustomAttrNameModel.get, 'foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = CUSTOM_ATTR_NAME_ITEM_DATA
item = CustomAttrNameModel.get('foo', 'bar')
self.assertEqual(item.overidden_attr, CUSTOM_ATTR_NAME_ITEM_DATA['Item']['foo_attr']['S'])
self.assertEqual(item.overidden_user_name, CUSTOM_ATTR_NAME_ITEM_DATA['Item']['user_name']['S'])
self.assertEqual(item.overidden_user_id, CUSTOM_ATTR_NAME_ITEM_DATA['Item']['user_id']['S'])
def test_batch_get(self):
"""
Model.batch_get
"""
with patch(PATCH_METHOD) as req:
req.return_value = SIMPLE_MODEL_TABLE_DATA
SimpleUserModel('foo')
with patch(PATCH_METHOD) as req:
req.return_value = SIMPLE_BATCH_GET_ITEMS
item_keys = ['hash-{0}'.format(x) for x in range(10)]
for item in SimpleUserModel.batch_get(item_keys):
self.assertIsNotNone(item)
params = {
'ReturnConsumedCapacity': 'TOTAL',
'RequestItems': {
'SimpleModel': {
'Keys': [
{'user_name': {'S': 'hash-9'}},
{'user_name': {'S': 'hash-8'}},
{'user_name': {'S': 'hash-7'}},
{'user_name': {'S': 'hash-6'}},
{'user_name': {'S': 'hash-5'}},
{'user_name': {'S': 'hash-4'}},
{'user_name': {'S': 'hash-3'}},
{'user_name': {'S': 'hash-2'}},
{'user_name': {'S': 'hash-1'}},
{'user_name': {'S': 'hash-0'}}
]
}
}
}
self.assertEqual(params, req.call_args[0][1])
with patch(PATCH_METHOD) as req:
req.return_value = MODEL_TABLE_DATA
UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
item_keys = [('hash-{0}'.format(x), '{0}'.format(x)) for x in range(10)]
item_keys_copy = list(item_keys)
req.return_value = BATCH_GET_ITEMS
for item in UserModel.batch_get(item_keys):
self.assertIsNotNone(item)
self.assertEqual(item_keys, item_keys_copy)
params = {
'RequestItems': {
'UserModel': {
'Keys': [
{'user_name': {'S': 'hash-0'}, 'user_id': {'S': '0'}},
{'user_name': {'S': 'hash-1'}, 'user_id': {'S': '1'}},
{'user_name': {'S': 'hash-2'}, 'user_id': {'S': '2'}},
{'user_name': {'S': 'hash-3'}, 'user_id': {'S': '3'}},
{'user_name': {'S': 'hash-4'}, 'user_id': {'S': '4'}},
{'user_name': {'S': 'hash-5'}, 'user_id': {'S': '5'}},
{'user_name': {'S': 'hash-6'}, 'user_id': {'S': '6'}},
{'user_name': {'S': 'hash-7'}, 'user_id': {'S': '7'}},
{'user_name': {'S': 'hash-8'}, 'user_id': {'S': '8'}},
{'user_name': {'S': 'hash-9'}, 'user_id': {'S': '9'}}
]
}
}
}
args = req.call_args[0][1]
self.assertTrue('RequestItems' in params)
self.assertTrue('UserModel' in params['RequestItems'])
self.assertTrue('Keys' in params['RequestItems']['UserModel'])
self.assert_dict_lists_equal(
params['RequestItems']['UserModel']['Keys'],
args['RequestItems']['UserModel']['Keys'],
)
def fake_batch_get(*batch_args):
kwargs = batch_args[1]
if REQUEST_ITEMS in kwargs:
batch_item = kwargs.get(REQUEST_ITEMS).get(UserModel.Meta.table_name).get(KEYS)[0]
batch_items = kwargs.get(REQUEST_ITEMS).get(UserModel.Meta.table_name).get(KEYS)[1:]
response = {
UNPROCESSED_KEYS: {
UserModel.Meta.table_name: {
KEYS: batch_items
}
},
RESPONSES: {
UserModel.Meta.table_name: [batch_item]
}
}
return response
return {}
batch_get_mock = MagicMock()
batch_get_mock.side_effect = fake_batch_get
with patch(PATCH_METHOD, new=batch_get_mock) as req:
item_keys = [('hash-{0}'.format(x), '{0}'.format(x)) for x in range(200)]
for item in UserModel.batch_get(item_keys):
self.assertIsNotNone(item)
def test_batch_write(self):
"""
Model.batch_write
"""
with patch(PATCH_METHOD) as req:
req.return_value = {}
with UserModel.batch_write(auto_commit=False) as batch:
pass
with UserModel.batch_write() as batch:
self.assertIsNone(batch.commit())
with self.assertRaises(ValueError):
with UserModel.batch_write(auto_commit=False) as batch:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(26)]
for item in items:
batch.delete(item)
self.assertRaises(ValueError, batch.save, UserModel('asdf', '1234'))
with UserModel.batch_write(auto_commit=False) as batch:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(25)]
for item in items:
batch.delete(item)
self.assertRaises(ValueError, batch.save, UserModel('asdf', '1234'))
with UserModel.batch_write(auto_commit=False) as batch:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(25)]
for item in items:
batch.save(item)
self.assertRaises(ValueError, batch.save, UserModel('asdf', '1234'))
with UserModel.batch_write() as batch:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(30)]
for item in items:
batch.delete(item)
with UserModel.batch_write() as batch:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(30)]
for item in items:
batch.save(item)
def fake_unprocessed_keys(*args, **kwargs):
if pythonic(REQUEST_ITEMS) in kwargs:
batch_items = kwargs.get(pythonic(REQUEST_ITEMS)).get(UserModel.Meta.table_name)[1:]
unprocessed = {
UNPROCESSED_KEYS: {
UserModel.Meta.table_name: batch_items
}
}
return unprocessed
return {}
batch_write_mock = MagicMock()
batch_write_mock.side_effect = fake_unprocessed_keys
with patch(PATCH_METHOD, new=batch_write_mock) as req:
items = [UserModel('hash-{0}'.format(x), '{0}'.format(x)) for x in range(500)]
for item in items:
batch.save(item)
def test_index_queries(self):
"""
Model.Index.Query
"""
with patch(PATCH_METHOD) as req:
req.return_value = CUSTOM_ATTR_NAME_INDEX_TABLE_DATA
CustomAttrNameModel._get_meta_data()
with patch(PATCH_METHOD) as req:
req.return_value = INDEX_TABLE_DATA
IndexedModel._get_connection().describe_table()
with patch(PATCH_METHOD) as req:
req.return_value = LOCAL_INDEX_TABLE_DATA
LocalIndexedModel._get_meta_data()
self.assertEqual(IndexedModel.include_index.Meta.index_name, "non_key_idx")
queried = []
with patch(PATCH_METHOD) as req:
with self.assertRaises(ValueError):
for item in IndexedModel.email_index.query('foo', user_id__between=['id-1', 'id-3']):
queried.append(item._serialize().get(RANGE))
with patch(PATCH_METHOD) as req:
with self.assertRaises(ValueError):
for item in IndexedModel.email_index.query('foo', user_name__startswith='foo'):
queried.append(item._serialize().get(RANGE))
with patch(PATCH_METHOD) as req:
with self.assertRaises(ValueError):
for item in IndexedModel.email_index.query('foo', name='foo'):
queried.append(item._serialize().get(RANGE))
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_name'] = {STRING_SHORT: 'id-{0}'.format(idx)}
item['email'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in IndexedModel.email_index.query('foo', limit=2, user_name__begins_with='bar'):
queried.append(item._serialize())
params = {
'KeyConditions': {
'user_name': {
'ComparisonOperator': 'BEGINS_WITH',
'AttributeValueList': [
{
'S': u'bar'
}
]
},
'email': {
'ComparisonOperator': 'EQ',
'AttributeValueList': [
{
'S': u'foo'
}
]
}
},
'IndexName': 'custom_idx_name',
'TableName': 'IndexedModel',
'ReturnConsumedCapacity': 'TOTAL',
'Limit': 2
}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_name'] = {STRING_SHORT: 'id-{0}'.format(idx)}
item['email'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in LocalIndexedModel.email_index.query(
'foo',
limit=1,
user_name__begins_with='bar',
aliases__contains=1):
queried.append(item._serialize())
params = {
'KeyConditions': {
'user_name': {
'ComparisonOperator': 'BEGINS_WITH',
'AttributeValueList': [
{
'S': u'bar'
}
]
},
'email': {
'ComparisonOperator': 'EQ',
'AttributeValueList': [
{
'S': u'foo'
}
]
}
},
'QueryFilter': {
'aliases': {
'AttributeValueList': [
{
'SS': ['1']
}
],
'ComparisonOperator': 'CONTAINS'
}
},
'IndexName': 'email_index',
'TableName': 'LocalIndexedModel',
'ReturnConsumedCapacity': 'TOTAL',
'Limit': 1
}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_name'] = {STRING_SHORT: 'id-{0}'.format(idx)}
items.append(item)
req.return_value = {'Items': items}
queried = []
for item in CustomAttrNameModel.uid_index.query('foo', limit=2, user_name__begins_with='bar'):
queried.append(item._serialize())
params = {
'KeyConditions': {
'user_name': {
'ComparisonOperator': 'BEGINS_WITH',
'AttributeValueList': [
{
'S': u'bar'
}
]
},
'user_id': {
'ComparisonOperator': 'EQ',
'AttributeValueList': [
{
'S': u'foo'
}
]
}
},
'IndexName': 'uid_index',
'TableName': 'CustomAttrModel',
'ReturnConsumedCapacity': 'TOTAL',
'Limit': 2
}
self.assertEqual(req.call_args[0][1], params)
def test_multiple_indices_share_non_key_attribute(self):
"""
Models.Model
"""
scope_args = {'count': 0}
def fake_dynamodb(*args, **kwargs):
if scope_args['count'] == 0:
scope_args['count'] += 1
raise ClientError({'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Not Found'}},
"DescribeTable")
return {}
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db) as req:
IndexedModel.create_table(read_capacity_units=2, write_capacity_units=2)
params = {
'AttributeDefinitions': [
{'AttributeName': 'email', 'AttributeType': 'S'},
{'AttributeName': 'numbers', 'AttributeType': 'NS'},
{'AttributeName': 'user_name', 'AttributeType': 'S'}
]
}
args = req.call_args[0][1]
self.assert_dict_lists_equal(args['AttributeDefinitions'], params['AttributeDefinitions'])
scope_args['count'] = 0
with patch(PATCH_METHOD, new=fake_db) as req:
GameModel.create_table()
params = {
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'player_id'},
{'KeyType': 'RANGE', 'AttributeName': 'created_time'}
],
'LocalSecondaryIndexes': [
{
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'player_id'},
{'KeyType': 'RANGE', 'AttributeName': 'winner_id'}
],
'IndexName': 'player_opponent_index',
'Projection': {'ProjectionType': 'ALL'}
}
],
'TableName': 'GameModel',
'ProvisionedThroughput': {'WriteCapacityUnits': 1, 'ReadCapacityUnits': 1},
'GlobalSecondaryIndexes': [
{
'ProvisionedThroughput': {'WriteCapacityUnits': 1, 'ReadCapacityUnits': 1},
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'winner_id'},
{'KeyType': 'RANGE', 'AttributeName': 'created_time'}
],
'IndexName': 'opponent_time_index',
'Projection': {'ProjectionType': 'ALL'}
}
],
'AttributeDefinitions': [
{'AttributeName': 'created_time', 'AttributeType': 'S'},
{'AttributeName': 'player_id', 'AttributeType': 'S'},
{'AttributeName': 'winner_id', 'AttributeType': 'S'}
]
}
args = req.call_args[0][1]
for key in ['KeySchema', 'AttributeDefinitions', 'LocalSecondaryIndexes', 'GlobalSecondaryIndexes']:
self.assert_dict_lists_equal(args[key], params[key])
def test_global_index(self):
"""
Models.GlobalSecondaryIndex
"""
self.assertIsNotNone(IndexedModel.email_index._hash_key_attribute())
self.assertEqual(IndexedModel.email_index.Meta.projection.projection_type, AllProjection.projection_type)
with patch(PATCH_METHOD) as req:
req.return_value = INDEX_TABLE_DATA
with self.assertRaises(ValueError):
IndexedModel('foo', 'bar')
IndexedModel._get_meta_data()
scope_args = {'count': 0}
def fake_dynamodb(*args, **kwargs):
if scope_args['count'] == 0:
scope_args['count'] += 1
raise ClientError({'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Not Found'}},
"DescribeTable")
else:
return {}
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db) as req:
IndexedModel.create_table(read_capacity_units=2, write_capacity_units=2)
params = {
'AttributeDefinitions': [
{'attribute_name': 'email', 'attribute_type': 'S'},
{'attribute_name': 'numbers', 'attribute_type': 'NS'}
],
'KeySchema': [
{'AttributeName': 'numbers', 'KeyType': 'RANGE'},
{'AttributeName': 'email', 'KeyType': 'HASH'}
]
}
schema = IndexedModel.email_index._get_schema()
args = req.call_args[0][1]
self.assertEqual(
args['GlobalSecondaryIndexes'][0]['ProvisionedThroughput'],
{
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 1
}
)
self.assert_dict_lists_equal(schema['key_schema'], params['KeySchema'])
self.assert_dict_lists_equal(schema['attribute_definitions'], params['AttributeDefinitions'])
def test_local_index(self):
"""
Models.LocalSecondaryIndex
"""
with self.assertRaises(ValueError):
with patch(PATCH_METHOD) as req:
req.return_value = LOCAL_INDEX_TABLE_DATA
# This table has no range key
LocalIndexedModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = LOCAL_INDEX_TABLE_DATA
LocalIndexedModel('foo')
schema = IndexedModel._get_indexes()
expected = {
'local_secondary_indexes': [
{
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'email'},
{'KeyType': 'RANGE', 'AttributeName': 'numbers'}
],
'IndexName': 'include_index',
'projection': {
'ProjectionType': 'INCLUDE',
'NonKeyAttributes': ['numbers']
}
}
],
'global_secondary_indexes': [
{
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'email'},
{'KeyType': 'RANGE', 'AttributeName': 'numbers'}
],
'IndexName': 'email_index',
'projection': {'ProjectionType': 'ALL'},
'provisioned_throughput': {
'WriteCapacityUnits': 1,
'ReadCapacityUnits': 2
}
}
],
'attribute_definitions': [
{'attribute_type': 'S', 'attribute_name': 'email'},
{'attribute_type': 'NS', 'attribute_name': 'numbers'},
{'attribute_type': 'S', 'attribute_name': 'email'},
{'attribute_type': 'NS', 'attribute_name': 'numbers'}
]
}
self.assert_dict_lists_equal(
schema['attribute_definitions'],
expected['attribute_definitions']
)
self.assertEqual(schema['local_secondary_indexes'][0]['projection']['ProjectionType'], 'INCLUDE')
self.assertEqual(schema['local_secondary_indexes'][0]['projection']['NonKeyAttributes'], ['numbers'])
scope_args = {'count': 0}
def fake_dynamodb(*args, **kwargs):
if scope_args['count'] == 0:
scope_args['count'] += 1
raise ClientError({'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Not Found'}},
"DescribeTable")
else:
return {}
fake_db = MagicMock()
fake_db.side_effect = fake_dynamodb
with patch(PATCH_METHOD, new=fake_db) as req:
LocalIndexedModel.create_table(read_capacity_units=2, write_capacity_units=2)
params = OrderedDict({
'AttributeDefinitions': [
{
'attribute_name': 'email', 'attribute_type': 'S'
},
{
'attribute_name': 'numbers',
'attribute_type': 'NS'
}
],
'KeySchema': [
{
'AttributeName': 'email', 'KeyType': 'HASH'
},
{
'AttributeName': 'numbers', 'KeyType': 'RANGE'
}
]
})
schema = LocalIndexedModel.email_index._get_schema()
args = req.call_args[0][1]
self.assert_dict_lists_equal(schema['attribute_definitions'], params['AttributeDefinitions'])
self.assert_dict_lists_equal(schema['key_schema'], params['KeySchema'])
self.assertTrue('ProvisionedThroughput' not in args['LocalSecondaryIndexes'][0])
def test_projections(self):
"""
Models.Projection
"""
projection = AllProjection()
self.assertEqual(projection.projection_type, ALL)
projection = KeysOnlyProjection()
self.assertEqual(projection.projection_type, KEYS_ONLY)
projection = IncludeProjection(non_attr_keys=['foo', 'bar'])
self.assertEqual(projection.projection_type, INCLUDE)
self.assertEqual(projection.non_key_attributes, ['foo', 'bar'])
self.assertRaises(ValueError, IncludeProjection, None)
with self.assertRaises(ValueError):
class BadIndex(Index):
pass
BadIndex()
with self.assertRaises(ValueError):
class BadIndex(Index):
class Meta:
pass
pass
BadIndex()
def test_throttle(self):
"""
Throttle.add_record
"""
throt = Throttle(30)
throt.add_record(None)
for i in range(10):
throt.add_record(1)
throt.throttle()
for i in range(2):
throt.add_record(50)
throt.throttle()
def test_old_style_model_exception(self):
"""
Display warning for pre v1.0 Models
"""
with self.assertRaises(AttributeError):
OldStyleModel._get_meta_data()
with self.assertRaises(AttributeError):
OldStyleModel.exists()
def test_dumps(self):
"""
Model.dumps
"""
with patch(PATCH_METHOD) as req:
items = []
for idx in range(10):
item = copy.copy(GET_MODEL_ITEM_DATA.get(ITEM))
item['user_id'] = {STRING_SHORT: 'id-{0}'.format(idx)}
item['email'] = {STRING_SHORT: 'email-{0}'.format(random.randint(0, 65536))}
item['picture'] = {BINARY_SHORT: BINARY_ATTR_DATA}
items.append(item)
req.return_value = {'Items': items}
content = UserModel.dumps()
serialized_items = json.loads(content)
for original, new_item in zip(items, serialized_items):
self.assertEqual(new_item[0], original['user_name'][STRING_SHORT])
self.assertEqual(new_item[1][pythonic(ATTRIBUTES)]['zip_code']['N'], original['zip_code']['N'])
self.assertEqual(new_item[1][pythonic(ATTRIBUTES)]['email']['S'], original['email']['S'])
self.assertEqual(new_item[1][pythonic(ATTRIBUTES)]['picture']['B'], original['picture']['B'])
def test_loads(self):
"""
Model.loads
"""
with patch(PATCH_METHOD) as req:
req.return_value = {}
UserModel.loads(json.dumps(SERIALIZED_TABLE_DATA))
args = {
'UserModel': [
{
'PutRequest': {
'Item': {
'user_id': {'S': u'id-0'},
'callable_field': {'N': '42'},
'user_name': {'S': u'foo'},
'email': {'S': u'email-7980'},
'picture': {
"B": "aGVsbG8sIHdvcmxk"
},
'zip_code': {'N': '88030'}
}
}
},
{
'PutRequest': {
'Item': {
'user_id': {'S': u'id-1'},
'callable_field': {'N': '42'},
'user_name': {'S': u'foo'},
'email': {'S': u'email-19770'},
'picture': {
"B": "aGVsbG8sIHdvcmxk"
},
'zip_code': {'N': '88030'}
}
}
}
]
}
self.assert_dict_lists_equal(req.call_args[0][1]['RequestItems']['UserModel'], args['UserModel'])
|
|
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
if sys.version_info[:2] >= (3, 4):
import pathlib
else:
import pathlib2 as pathlib
import py
import pytest
import tox
from tox.config import parseconfig
from tox.reporter import Verbosity
from tox.session import Session
pytest_plugins = "pytester"
class TestSession:
def test_log_pcall(self, mocksession):
mocksession.logging_levels(quiet=Verbosity.DEFAULT, verbose=Verbosity.INFO)
mocksession.config.logdir.ensure(dir=1)
assert not mocksession.config.logdir.listdir()
with mocksession.newaction("what", "something") as action:
action.popen(["echo"])
match = mocksession.report.getnext("logpopen")
log_name = py.path.local(match[1].split(">")[-1].strip()).relto(
mocksession.config.logdir,
)
assert log_name == "what-0.log"
def test_summary_status(self, initproj, capfd):
initproj(
"logexample123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:hello]
[testenv:world]
""",
},
)
config = parseconfig([])
session = Session(config)
envs = list(session.venv_dict.values())
assert len(envs) == 2
env1, env2 = envs
env1.status = "FAIL XYZ"
assert env1.status
env2.status = 0
assert not env2.status
session._summary()
out, err = capfd.readouterr()
exp = "{}: FAIL XYZ".format(env1.envconfig.envname)
assert exp in out
exp = "{}: commands succeeded".format(env2.envconfig.envname)
assert exp in out
def test_getvenv(self, initproj):
initproj(
"logexample123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:hello]
[testenv:world]
""",
},
)
config = parseconfig([])
session = Session(config)
venv1 = session.getvenv("hello")
venv2 = session.getvenv("hello")
assert venv1 is venv2
venv1 = session.getvenv("world")
venv2 = session.getvenv("world")
assert venv1 is venv2
with pytest.raises(LookupError):
session.getvenv("qwe")
def test_notoxini_help_still_works(initproj, cmd):
initproj("example123-0.5", filedefs={"tests": {"test_hello.py": "def test_hello(): pass"}})
result = cmd("-h")
assert result.out.startswith("usage: ")
assert any("--help" in line for line in result.outlines), result.outlines
result.assert_success(is_run_test_env=False)
def test_notoxini_noerror_in_help(initproj, cmd):
initproj("examplepro", filedefs={})
result = cmd("-h")
msg = "ERROR: tox config file (either pyproject.toml, tox.ini, setup.cfg) not found\n"
assert result.err != msg
def test_notoxini_help_ini_still_works(initproj, cmd):
initproj("example123-0.5", filedefs={"tests": {"test_hello.py": "def test_hello(): pass"}})
result = cmd("--help-ini")
assert any("setenv" in line for line in result.outlines), result.outlines
result.assert_success(is_run_test_env=False)
def test_notoxini_noerror_in_help_ini(initproj, cmd):
initproj("examplepro", filedefs={})
result = cmd("--help-ini")
msg = "ERROR: tox config file (either pyproject.toml, tox.ini, setup.cfg) not found\n"
assert result.err != msg
def test_unrecognized_arguments_error(initproj, cmd):
initproj(
"examplepro1",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:hello]
[testenv:world]
""",
},
)
result1 = cmd("--invalid-argument")
withtoxini = result1.err
initproj("examplepro2", filedefs={})
result2 = cmd("--invalid-argument")
notoxini = result2.err
assert withtoxini == notoxini
def test_envdir_equals_toxini_errors_out(cmd, initproj):
initproj(
"interp123-0.7",
filedefs={
"tox.ini": """
[testenv]
envdir={toxinidir}
""",
},
)
result = cmd()
assert result.outlines[1] == "ERROR: ConfigError: envdir must not equal toxinidir"
assert re.match(
r"ERROR: venv \'python\' in .* would delete project",
result.outlines[0],
), result.outlines[0]
result.assert_fail()
def test_envdir_would_delete_some_directory(cmd, initproj):
projdir = initproj(
"example-123",
filedefs={
"tox.ini": """\
[tox]
[testenv:venv]
envdir=example
commands=
""",
},
)
result = cmd("-e", "venv")
assert projdir.join("example/__init__.py").exists()
result.assert_fail()
assert "cowardly refusing to delete `envdir`" in result.out
def test_recreate(cmd, initproj):
initproj("example-123", filedefs={"tox.ini": ""})
cmd("-e", "py", "--notest").assert_success()
cmd("-r", "-e", "py", "--notest").assert_success()
def test_run_custom_install_command_error(cmd, initproj):
initproj(
"interp123-0.5",
filedefs={
"tox.ini": """
[testenv]
install_command=./tox.ini {opts} {packages}
""",
},
)
result = cmd()
result.assert_fail()
re.match(
r"ERROR: python: InvocationError for command .* \(exited with code \d+\)",
result.outlines[-1],
), result.out
def test_unknown_interpreter_and_env(cmd, initproj):
initproj(
"interp123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """\
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
skip_install = true
""",
},
)
result = cmd()
result.assert_fail()
assert "ERROR: InterpreterNotFound: xyz_unknown_interpreter" in result.outlines
result = cmd("-exyz")
result.assert_fail()
assert result.out == "ERROR: unknown environment 'xyz'\n"
def test_unknown_interpreter_factor(cmd, initproj):
initproj("py21", filedefs={"tox.ini": "[testenv]\nskip_install=true"})
result = cmd("-e", "py21")
result.assert_fail()
assert "ERROR: InterpreterNotFound: python2.1" in result.outlines
def test_unknown_interpreter(cmd, initproj):
initproj(
"interp123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
""",
},
)
result = cmd()
result.assert_fail()
assert any(
"ERROR: InterpreterNotFound: xyz_unknown_interpreter" == line for line in result.outlines
), result.outlines
def test_skip_platform_mismatch(cmd, initproj):
initproj(
"interp123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv]
changedir=tests
platform=x123
""",
},
)
result = cmd()
result.assert_success()
assert any(
"SKIPPED: python: platform mismatch ({!r} does not match 'x123')".format(sys.platform)
== line
for line in result.outlines
), result.outlines
def test_skip_unknown_interpreter(cmd, initproj):
initproj(
"interp123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
""",
},
)
result = cmd("--skip-missing-interpreters")
result.assert_success()
msg = "SKIPPED: python: InterpreterNotFound: xyz_unknown_interpreter"
assert any(msg == line for line in result.outlines), result.outlines
def test_skip_unknown_interpreter_result_json(cmd, initproj, tmpdir):
report_path = tmpdir.join("toxresult.json")
initproj(
"interp123-0.5",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv:python]
basepython=xyz_unknown_interpreter
[testenv]
changedir=tests
""",
},
)
result = cmd("--skip-missing-interpreters", "--result-json", report_path)
result.assert_success()
msg = "SKIPPED: python: InterpreterNotFound: xyz_unknown_interpreter"
assert any(msg == line for line in result.outlines), result.outlines
setup_result_from_json = json.load(report_path)["testenvs"]["python"]["setup"]
for setup_step in setup_result_from_json:
assert "InterpreterNotFound" in setup_step["output"]
assert setup_step["retcode"] == 0
def test_unknown_dep(cmd, initproj):
initproj(
"dep123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[testenv]
deps=qweqwe123
changedir=tests
""",
},
)
result = cmd()
result.assert_fail()
assert result.outlines[-1].startswith("ERROR: python: could not install deps [qweqwe123];")
def test_venv_special_chars_issue252(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"tox.ini": """
[tox]
envlist = special&&1
[testenv:special&&1]
changedir=tests
""",
},
)
result = cmd()
result.assert_success()
pattern = re.compile(r"special&&1 installed: .*pkg123( @ .*-|==)0\.7(\.zip)?.*")
assert any(pattern.match(line) for line in result.outlines), "\n".join(result.outlines)
def test_unknown_environment(cmd, initproj):
initproj("env123-0.7", filedefs={"tox.ini": ""})
result = cmd("-e", "qpwoei")
result.assert_fail()
assert result.out == "ERROR: unknown environment 'qpwoei'\n"
def test_unknown_environment_with_envlist(cmd, initproj):
initproj(
"pkg123",
filedefs={
"tox.ini": """
[tox]
envlist = py{36,37}-django{20,21}
""",
},
)
result = cmd("-e", "py36-djagno21")
result.assert_fail()
assert result.out == "ERROR: unknown environment 'py36-djagno21'\n"
def test_minimal_setup_py_empty(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"setup.py": """
""",
"tox.ini": "",
},
)
result = cmd()
result.assert_fail()
assert result.outlines[-1] == "ERROR: setup.py is empty"
def test_minimal_setup_py_comment_only(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"setup.py": """\n# some comment
""",
"tox.ini": "",
},
)
result = cmd()
result.assert_fail()
assert result.outlines[-1] == "ERROR: setup.py is empty"
def test_minimal_setup_py_non_functional(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"setup.py": """
import sys
""",
"tox.ini": "",
},
)
result = cmd()
result.assert_fail()
assert any(
re.match(r".*ERROR.*check setup.py.*", line) for line in result.outlines
), result.outlines
def test_sdist_fails(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"setup.py": """
syntax error
""",
"tox.ini": "",
},
)
result = cmd()
result.assert_fail()
assert any(
re.match(r".*FAIL.*could not package project.*", line) for line in result.outlines
), result.outlines
def test_no_setup_py_exits(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tox.ini": """
[testenv]
commands=python -c "2 + 2"
""",
},
)
os.remove("setup.py")
result = cmd()
result.assert_fail()
assert any(
re.match(r".*ERROR.*No pyproject.toml or setup.py file found.*", line)
for line in result.outlines
), result.outlines
def test_no_setup_py_exits_but_pyproject_toml_does(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tox.ini": """
[testenv]
commands=python -c "2 + 2"
""",
},
)
os.remove("setup.py")
pathlib.Path("pyproject.toml").touch()
result = cmd()
result.assert_fail()
assert any(
re.match(r".*ERROR.*pyproject.toml file found.*", line) for line in result.outlines
), result.outlines
assert any(
re.match(r".*To use a PEP 517 build-backend you are required to*", line)
for line in result.outlines
), result.outlines
def test_package_install_fails(cmd, initproj):
initproj(
"pkg123-0.7",
filedefs={
"tests": {"test_hello.py": "def test_hello(): pass"},
"setup.py": """
from setuptools import setup
setup(
name='pkg123',
description='pkg123 project',
version='0.7',
license='MIT',
platforms=['unix', 'win32'],
packages=['pkg123',],
install_requires=['qweqwe123'],
)
""",
"tox.ini": "",
},
)
result = cmd()
result.assert_fail()
assert result.outlines[-1].startswith("ERROR: python: InvocationError for command ")
@pytest.fixture
def example123(initproj):
yield initproj(
"example123-0.5",
filedefs={
"tests": {
"test_hello.py": """
def test_hello(pytestconfig):
pass
""",
},
"tox.ini": """
[testenv]
changedir=tests
commands= pytest --basetemp={envtmpdir} \
--junitxml=junit-{envname}.xml
deps=pytest
""",
},
)
def test_toxuone_env(cmd, example123):
result = cmd()
result.assert_success()
assert re.match(
r".*generated\W+xml\W+file.*junit-python\.xml" r".*\W+1\W+passed.*",
result.out,
re.DOTALL,
)
result = cmd("-epython")
result.assert_success()
assert re.match(
r".*\W+1\W+passed.*" r"summary.*" r"python:\W+commands\W+succeeded.*",
result.out,
re.DOTALL,
)
def test_different_config_cwd(cmd, example123):
# see that things work with a different CWD
with example123.dirpath().as_cwd():
result = cmd("-c", "example123/tox.ini")
result.assert_success()
assert re.match(
r".*\W+1\W+passed.*" r"summary.*" r"python:\W+commands\W+succeeded.*",
result.out,
re.DOTALL,
)
def test_result_json(cmd, initproj, example123):
cwd = initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
deps = setuptools
commands_pre = python -c 'print("START")'
commands = python -c 'print("OK")'
- python -c 'print("1"); raise SystemExit(1)'
python -c 'print("1"); raise SystemExit(2)'
python -c 'print("SHOULD NOT HAPPEN")'
commands_post = python -c 'print("END")'
""",
},
)
json_path = cwd / "res.json"
result = cmd("--result-json", json_path)
result.assert_fail()
data = json.loads(json_path.read_text(encoding="utf-8"))
assert data["reportversion"] == "1"
assert data["toxversion"] == tox.__version__
for env_data in data["testenvs"].values():
for command_type in ("setup", "test"):
if command_type not in env_data:
assert False, "missing {}".format(command_type)
for command in env_data[command_type]:
assert isinstance(command["command"], list)
assert command["output"]
assert "retcode" in command
assert isinstance(command["retcode"], int)
# virtualenv, deps install, package install, freeze
assert len(env_data["setup"]) == 4
# 1 pre + 3 command + 1 post
assert len(env_data["test"]) == 5
assert isinstance(env_data["installed_packages"], list)
pyinfo = env_data["python"]
assert isinstance(pyinfo["version_info"], list)
assert pyinfo["version"]
assert pyinfo["executable"]
assert "write json report at: {}".format(json_path) == result.outlines[-1]
def test_developz(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
""",
},
)
result = cmd("-vv", "--develop")
result.assert_success()
assert "sdist-make" not in result.out
def test_usedevelop(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
usedevelop=True
""",
},
)
result = cmd("-vv")
result.assert_success()
assert "sdist-make" not in result.out
def test_usedevelop_mixed(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv:dev]
usedevelop=True
[testenv:nondev]
usedevelop=False
""",
},
)
# running only 'dev' should not do sdist
result = cmd("-vv", "-e", "dev")
result.assert_success()
assert "sdist-make" not in result.out
# running all envs should do sdist
result = cmd("-vv")
result.assert_success()
assert "sdist-make" in result.out
@pytest.mark.parametrize("skipsdist", [False, True])
@pytest.mark.parametrize("src_root", [".", "src"])
def test_test_usedevelop(cmd, initproj, src_root, skipsdist):
name = "example123-spameggs"
base = initproj(
(name, "0.5"),
src_root=src_root,
filedefs={
"tests": {
"test_hello.py": """
def test_hello(pytestconfig):
pass
""",
},
"tox.ini": """
[testenv]
usedevelop=True
changedir=tests
commands=
pytest --basetemp={envtmpdir} --junitxml=junit-{envname}.xml []
deps=pytest"""
+ """
skipsdist={}
""".format(
skipsdist,
),
},
)
result = cmd("-v")
result.assert_success()
assert re.match(
r".*generated\W+xml\W+file.*junit-python\.xml" r".*\W+1\W+passed.*",
result.out,
re.DOTALL,
)
assert "sdist-make" not in result.out
result = cmd("-epython")
result.assert_success()
assert "develop-inst-noop" in result.out
assert re.match(
r".*\W+1\W+passed.*" r"summary.*" r"python:\W+commands\W+succeeded.*",
result.out,
re.DOTALL,
)
# see that things work with a different CWD
with base.dirpath().as_cwd():
result = cmd("-c", "{}/tox.ini".format(name))
result.assert_success()
assert "develop-inst-noop" in result.out
assert re.match(
r".*\W+1\W+passed.*" r"summary.*" r"python:\W+commands\W+succeeded.*",
result.out,
re.DOTALL,
)
# see that tests can also fail and retcode is correct
testfile = py.path.local("tests").join("test_hello.py")
assert testfile.check()
testfile.write("def test_fail(): assert 0")
result = cmd()
result.assert_fail()
assert "develop-inst-noop" in result.out
assert re.match(
r".*\W+1\W+failed.*" r"summary.*" r"python:\W+commands\W+failed.*",
result.out,
re.DOTALL,
)
# test develop is called if setup.py changes
setup_py = py.path.local("setup.py")
setup_py.write(setup_py.read() + " ")
result = cmd()
result.assert_fail()
assert "develop-inst-nodeps" in result.out
def test_warning_emitted(cmd, initproj):
initproj(
"spam-0.0.1",
filedefs={
"tox.ini": """
[testenv]
skipsdist=True
usedevelop=True
""",
"setup.py": """
from setuptools import setup
from warnings import warn
warn("I am a warning")
setup(name="spam", version="0.0.1")
""",
},
)
cmd()
result = cmd()
assert "develop-inst-noop" in result.out
assert "I am a warning" in result.err
def _alwayscopy_not_supported():
# This is due to virtualenv bugs with alwayscopy in some platforms
# see: https://github.com/pypa/virtualenv/issues/565
supported = True
tmpdir = tempfile.mkdtemp()
try:
with open(os.devnull) as fp:
subprocess.check_call(
[sys.executable, "-m", "virtualenv", "--always-copy", tmpdir],
stdout=fp,
stderr=fp,
)
except subprocess.CalledProcessError:
supported = False
finally:
shutil.rmtree(tmpdir)
return not supported
alwayscopy_not_supported = _alwayscopy_not_supported()
@pytest.mark.skipif(alwayscopy_not_supported, reason="Platform doesn't support alwayscopy")
def test_alwayscopy(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
commands={envpython} --version
alwayscopy=True
""",
},
)
result = cmd("-vv")
result.assert_success()
assert "virtualenv --always-copy" in result.out
def test_alwayscopy_default(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
commands={envpython} --version
""",
},
)
result = cmd("-vv")
result.assert_success()
assert "virtualenv --always-copy" not in result.out
@pytest.mark.skipif("sys.platform == 'win32'", reason="no echo on Windows")
def test_empty_activity_ignored(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
list_dependencies_command=echo
commands={envpython} --version
""",
},
)
result = cmd()
result.assert_success()
assert "installed:" not in result.out
@pytest.mark.skipif("sys.platform == 'win32'", reason="no echo on Windows")
def test_empty_activity_shown_verbose(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
[testenv]
list_dependencies_command=echo
commands={envpython} --version
allowlist_externals = echo
""",
},
)
result = cmd("-v")
result.assert_success()
assert "installed:" in result.out
def test_test_piphelp(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """
# content of: tox.ini
[testenv]
commands=pip -h
""",
},
)
result = cmd("-vv")
result.assert_success()
def test_notest(initproj, cmd):
initproj(
"example123",
filedefs={
"tox.ini": """\
# content of: tox.ini
[testenv:py26]
basepython={}
""".format(
sys.executable,
),
},
)
result = cmd("-v", "--notest")
result.assert_success()
assert re.match(r".*summary.*" r"py26\W+skipped\W+tests.*", result.out, re.DOTALL)
result = cmd("-v", "--notest", "-epy26")
result.assert_success()
assert re.match(r".*py26\W+reusing.*", result.out, re.DOTALL)
def test_notest_setup_py_error(initproj, cmd):
initproj(
"example123",
filedefs={
"setup.py": """\
from setuptools import setup
setup(name='x', install_requires=['fakefakefakefakefakefake']),
""",
"tox.ini": "",
},
)
result = cmd("--notest")
result.assert_fail()
assert re.search("ERROR:.*InvocationError", result.out)
@pytest.mark.parametrize("has_config", [True, False])
def test_devenv(initproj, cmd, has_config):
filedefs = {
"setup.py": """\
from setuptools import setup
setup(name='x')
""",
}
if has_config:
filedefs[
"tox.ini"
] = """\
[tox]
# envlist is ignored for --devenv
envlist = foo,bar,baz
[testenv]
# --devenv implies --notest
commands = python -c "exit(1)"
"""
initproj(
"example123",
filedefs=filedefs,
)
result = cmd("--devenv", "venv")
result.assert_success()
# `--devenv` defaults to the `py` environment and a develop install
assert "py develop-inst:" in result.out
assert re.search("py create:.*venv", result.out)
def test_devenv_does_not_allow_multiple_environments(initproj, cmd):
initproj(
"example123",
filedefs={
"setup.py": """\
from setuptools import setup
setup(name='x')
""",
"tox.ini": """\
[tox]
envlist=foo,bar,baz
""",
},
)
result = cmd("--devenv", "venv", "-e", "foo,bar")
result.assert_fail()
assert result.err == "ERROR: --devenv requires only a single -e\n"
def test_devenv_does_not_delete_project(initproj, cmd):
initproj(
"example123",
filedefs={
"setup.py": """\
from setuptools import setup
setup(name='x')
""",
"tox.ini": """\
[tox]
envlist=foo,bar,baz
""",
},
)
result = cmd("--devenv", "")
result.assert_fail()
assert "would delete project" in result.out
assert "ERROR: ConfigError: envdir must not equal toxinidir" in result.out
def test_PYC(initproj, cmd, monkeypatch):
initproj("example123", filedefs={"tox.ini": ""})
monkeypatch.setenv("PYTHONDOWNWRITEBYTECODE", "1")
result = cmd("-v", "--notest")
result.assert_success()
assert "create" in result.out
def test_env_VIRTUALENV_PYTHON(initproj, cmd, monkeypatch):
initproj("example123", filedefs={"tox.ini": ""})
monkeypatch.setenv("VIRTUALENV_PYTHON", "/FOO")
result = cmd("-v", "--notest")
result.assert_success()
assert "create" in result.out
def test_setup_prints_non_ascii(initproj, cmd):
initproj(
"example123",
filedefs={
"setup.py": """\
import sys
getattr(sys.stdout, 'buffer', sys.stdout).write(b'\\xe2\\x98\\x83\\n')
import setuptools
setuptools.setup(name='example123')
""",
"tox.ini": "",
},
)
result = cmd("--notest")
result.assert_success()
assert "create" in result.out
def test_envsitepackagesdir(cmd, initproj):
initproj(
"pkg512-0.0.5",
filedefs={
"tox.ini": """
[testenv]
commands=
python -c "print(r'X:{envsitepackagesdir}')"
""",
},
)
result = cmd()
result.assert_success()
assert re.match(r".*\nX:.*tox.*site-packages.*", result.out, re.DOTALL)
def test_envsitepackagesdir_skip_missing_issue280(cmd, initproj):
initproj(
"pkg513-0.0.5",
filedefs={
"tox.ini": """
[testenv]
basepython=/usr/bin/qwelkjqwle
commands=
{envsitepackagesdir}
""",
},
)
result = cmd("--skip-missing-interpreters")
result.assert_success()
assert re.match(r".*SKIPPED:.*qwelkj.*", result.out, re.DOTALL)
@pytest.mark.parametrize("verbosity", ["", "-v", "-vv"])
def test_verbosity(cmd, initproj, verbosity):
initproj(
"pkgX-0.0.5",
filedefs={
"tox.ini": """
[testenv]
""",
},
)
result = cmd(verbosity)
result.assert_success()
needle = "Successfully installed pkgX-0.0.5"
if verbosity == "-vv":
assert any(needle in line for line in result.outlines), result.outlines
else:
assert all(needle not in line for line in result.outlines), result.outlines
def test_envtmpdir(initproj, cmd):
initproj(
"foo",
filedefs={
# This file first checks that envtmpdir is existent and empty. Then it
# creates an empty file in that directory. The tox command is run
# twice below, so this is to test whether the directory is cleared
# before the second run.
"check_empty_envtmpdir.py": """if True:
import os
from sys import argv
envtmpdir = argv[1]
assert os.path.exists(envtmpdir)
assert os.listdir(envtmpdir) == []
open(os.path.join(envtmpdir, 'test'), 'w').close()
""",
"tox.ini": """
[testenv]
commands=python check_empty_envtmpdir.py {envtmpdir}
""",
},
)
result = cmd()
result.assert_success()
result = cmd()
result.assert_success()
def test_missing_env_fails(initproj, cmd):
ini = """
[testenv:foo]
install_command={env:FOO}
commands={env:VAR}
"""
initproj("foo", filedefs={"tox.ini": ini})
result = cmd()
result.assert_fail()
assert result.out.endswith(
"foo: unresolvable substitution(s):\n"
" commands: 'VAR'\n"
" install_command: 'FOO'\n"
"Environment variables are missing or defined recursively.\n",
)
def test_tox_console_script(initproj):
initproj("help", filedefs={"tox.ini": ""})
result = subprocess.check_call(["tox", "--help"])
assert result == 0
def test_tox_quickstart_script(initproj):
initproj("help", filedefs={"tox.ini": ""})
result = subprocess.check_call(["tox-quickstart", "--help"])
assert result == 0
def test_tox_cmdline_no_args(monkeypatch, initproj):
initproj("help", filedefs={"tox.ini": ""})
monkeypatch.setattr(sys, "argv", ["caller_script", "--help"])
with pytest.raises(SystemExit):
tox.cmdline()
def test_tox_cmdline_args(initproj):
initproj("help", filedefs={"tox.ini": ""})
with pytest.raises(SystemExit):
tox.cmdline(["caller_script", "--help"])
@pytest.mark.parametrize("exit_code", [0, 6])
def test_exit_code(initproj, cmd, exit_code, mocker):
"""Check for correct InvocationError, with exit code,
except for zero exit code"""
import tox.exception
mocker.spy(tox.exception, "exit_code_str")
tox_ini_content = "[testenv:foo]\ncommands=python -c 'import sys; sys.exit({:d})'".format(
exit_code,
)
initproj("foo", filedefs={"tox.ini": tox_ini_content})
cmd()
if exit_code:
# need mocker.spy above
assert tox.exception.exit_code_str.call_count == 1
(args, kwargs) = tox.exception.exit_code_str.call_args
assert kwargs == {}
(call_error_name, call_command, call_exit_code) = args
assert call_error_name == "InvocationError"
# quotes are removed in result.out
# do not include "python" as it is changed to python.EXE by appveyor
expected_command_arg = " -c 'import sys; sys.exit({:d})'".format(exit_code)
assert expected_command_arg in call_command
assert call_exit_code == exit_code
else:
# need mocker.spy above
assert tox.exception.exit_code_str.call_count == 0
|
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains integration test for TPUStrategy in regards to memory."""
import gc
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
NUM_CLASS = 10
def get_dataset():
def generate_data(_):
image = tf.ones([500, 500, 3], dtype=tf.float32)
label = tf.zeros([1], dtype=tf.int32)
return image, label
def preprocess(image, label):
label = tf.cast(label, tf.int32)
label = tf.one_hot(label, NUM_CLASS)
label = tf.reshape(label, [NUM_CLASS])
return image, label
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
generate_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat()
dataset = dataset.batch(128, drop_remainder=True)
return dataset
class TpuMemoryTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# Clear all cached tensors
context._reset_context()
# Run garbage collection to free any tensors from previous
# runs.
gc.collect()
# Run a small program and copy the result to CPU.
# This causes deferred deallocations to be flushed and new memory to be
# allocated in a less fragmented way.
# Turning deferred deallocations off no longer seems to work.
assert tf.reduce_sum(tf.random.uniform(
(1024, 128), dtype=tf.float32)).numpy() > 1.0
self.resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu="", project=None, zone=None)
tf.config.experimental_connect_to_cluster(self.resolver)
tf.tpu.experimental.initialize_tpu_system(self.resolver)
def testAutoDefragInProgramLoading(self):
# This test covers the case when training a large model on TPU. TPU HBM
# is not big enough to hold all TPU buffers and preserve stack for the
# TPU program. Runtime will automatically unload unused TPU program to
# free up space for TPU buffers. Having lots of TPU buffer may also
# introduce fragmentation in HBM to prevent us loading a TPU program
# properly. Runtime will automatically defrag in order to load a large
# TPU program.
strategy = tf.distribute.TPUStrategy(self.resolver)
dataset = get_dataset()
iterator = iter(
strategy.experimental_distribute_dataset(dataset,
tf.distribute.InputOptions()))
# Create a dummy big model that is close to HBM limit (15G):
# Temp HBM: 11G
# Sharded variable size: 2G
# Unsharded variables size: 4G
with strategy.scope():
x = tf.keras.layers.Input(shape=(500, 500, 3), name="input")
y = tf.keras.layers.Conv2D(
384, (15, 15),
strides=(2, 2),
padding="valid",
use_bias=False,
kernel_initializer="he_normal",
name="conv1")(
x)
y = tf.keras.layers.BatchNormalization(
momentum=0.997, center=True, scale=True)(
y)
y = tf.keras.layers.Dense(
10,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Conv2D(
64, (9, 9),
strides=(2, 2),
padding="valid",
use_bias=False,
kernel_initializer="he_normal",
name="conv2")(
y)
y = tf.keras.layers.Flatten()(y)
y = tf.keras.layers.Dense(
1024,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Dense(
1024,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Dense(
NUM_CLASS,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
model = tf.keras.Model(x, y)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=0.0, reduction=tf.keras.losses.Reduction.NONE)
model.compile(optimizer=optimizer, loss=loss_obj)
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images, training=True)
loss = model.loss(targets, outputs)
grads = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# Using host training loop here to trigger weight-update-sharding. It will
# introduce shard variable and unshard variable ops into the graph.
# When running unshard variable op, HBM won't have enough space for
# unsharded variables: 11G + 2G + 4G > 15G. So Runtime will have to
# automatically unload step function to free up space for unshard
# variable op.
for _ in tf.range(tf.constant(20)):
strategy.run(step_fn, args=(next(iterator),))
# We want to load the step function again after unshard variable op.
# However, we won't have enough space due to fragamentation:
# 15G - 2G - 4G < 11G. So Runtime will have to automatically defrag
# in order to load the program successfully.
strategy.run(step_fn, args=(next(iterator),))
# A dummy result to indicate this @tf.function has finished.
return 1.0
if FLAGS.tpu_use_tfrt:
result = train_step(iterator)
self.assertAllClose(1.0, result, atol=1e-07)
else:
# TPU StreamExecutor does not support auto-defrag in program loading. So
# it will return a ResourceExhaustedError.
with self.assertRaises(tf.errors.ResourceExhaustedError):
_ = train_step(iterator)
def testAutoDefragInBufferAllocation(self):
if not FLAGS.tpu_use_tfrt:
self.skipTest(
"TPU StreamExecutor does not support auto-defrag in allocation.")
with tf.device("TPU:0"):
# DF has ~15G HBM. Following 7 buffers will consume most HBM.
# pylint: disable=unused-variable
buffer_2g_1 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_2 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_3 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_4 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_5 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_6 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_7 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
# pylint: enable=unused-variable
# Deallocate two buffers.
del buffer_2g_1, buffer_2g_3
gc.collect()
# The buffer we just deallocated doesn't provide enough contiguous region
# for allocating 4G. This allocation will trigger auto-defrag.
buffer_4g = tf.random.uniform((4, 256, 1024, 1024), dtype=tf.float32)
self.assertEndsWith(buffer_4g.device, "device:TPU:0")
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python2.7.5
#Carnac Gui using OOP
'''
Intent of program is to:
Pull in a csv
Process the title column
Run rules to modify the column contents based on "roles"
Make the rule able to be changed in app by user
Run rules multiple times
Export to file
'''
#----------
#Import your shit
#----------
from Tkinter import *
from ttk import * #upgrades to ttk to allow dynamic modification of clm_select, some sytax changes in "borderwidth" and "padding" from the old tk
from tkMessageBox import*
import tkFileDialog
import csv
#----------
#call Make the Gui and buttons
#----------
class Carnac(Frame): #calls the main window
def __init__(self, parent = None):
Frame.__init__(self,parent) #makes main menu top level <--- this is a lie? see @learnwhat
self.imported_csv = []
self.working_file = []
self.Clm_select_variable = []
self.Clm_menu_values = []
self.ColumnSelectDropdown = []
self.pack(expand=YES, fill=BOTH)
self.createWidgets()
self.master.title("Carnac Role Guessing Tool")
self.master.iconname("Carnac")
def createWidgets(self): #loads widgets into Carnac
self.makeMenuBar()
self.makeColumnSelectBar()
self.makeButtonBar()
Side_scroll = Scrollbar(self)
Bot_scroll = Scrollbar(self)
Print_box = Text(self)
Side_scroll.config(command=Print_box.yview)
Side_scroll.pack(side=RIGHT, fill=Y)
Bot_scroll.config(command=Print_box.xview, orient=HORIZONTAL)
Bot_scroll.pack(side=BOTTOM, fill=X)
Print_box.config(state=DISABLED, relief=SUNKEN, width=80, height=20, bg='white',yscrollcommand=Side_scroll.set, xscrollcommand=Bot_scroll.set)
Print_box.config(wrap=NONE)
Print_box.pack(side=LEFT, expand=YES, fill=BOTH)
self.Print_box = Print_box
#-------
#Report function
#This function is to save time and code by enveloping the simon says with the text widget, into a print-like line
#'where' is not a string, 'what' and mod are
def report(self,where,what): #to be used to print to the user and the shell
self.Print_box.configure(state=NORMAL) #allows additions to text widget
self.Print_box.insert(where,what) #try printing to text widget
self.Print_box.insert(END,"\n")
print what #for debugging
self.Print_box.configure(state=DISABLED) #stops additions to text widget
#------------
def makeMenuBar(self):
self.menubar = Menu(self.master)
self.master.config(menu=self.menubar) #master top level window @leanwhat top level means specifically #THIS IS THE TOPLEVEL WINDOW THAT GETS CLOSED
self.fileMenu()
def fileMenu(self):
pulldown = Menu(self.menubar) # The (self.menubar) sets it in the menubar
pulldown.add_command(label="Import File", command=self.import_csv)
pulldown.add_command(label="Reset", command=self.RESET)
pulldown.add_command(label="Edit Rules", command=self.modify_rules)
pulldown.add_command(label="Save As", command=self.save_csv)
pulldown.add_separator()
pulldown.add_command(label="Close", command=self.program_quit)
self.menubar.add_cascade(label='File', underline=0, menu=pulldown)
def makeButtonBar(self): #aka button bar in the psuedo
ButtonBar = Frame(self, cursor='hand2', padding="3 0 0 0", relief=SUNKEN, borderwidth=2)
ButtonBar.pack(side=BOTTOM, fill=X)
import_button = Button(ButtonBar, text = "Import", command=self.import_csv)
import_button.pack(side = "left")
# column_select_button = Button(ButtonBar, text = "Select Column", command=self.column_pop)
#column_select_button.pack(side = "left")
output_button = Button(ButtonBar, text = "List Contents", command=self.output)
output_button.pack(side = "left")
rules_button = Button(ButtonBar, text = "Run Rules", command=self.run_rules)
rules_button.pack(side = "left")
import_button = Button(ButtonBar, text = "Save", command=self.save_csv)
import_button.pack(side = "left")
def makeColumnSelectBar(self):#runs only if there isn't one already
ColumnSelectBar = Frame(self, cursor='hand2', relief=SUNKEN, borderwidth=2)
ColumnSelectBar.pack(side=BOTTOM, fill=X)
#Clm_select_instruction = Label(ColumnSelectBar, text="Please select a column to run rules on:") #not needed now that selectdropdown is better filled
#Clm_select_instruction.pack(side=LEFT)
self.Clm_select_variable = StringVar(self)
self.Clm_menu_values = ['File not loaded, please "Import"']
self.Clm_select_variable.set(self.Clm_menu_values[0]) #default value, needed for the dropdown to work
self.ColumnSelectDropdown = OptionMenu(ColumnSelectBar, self.Clm_select_variable, *self.Clm_menu_values)
self.ColumnSelectDropdown.pack(side=LEFT)
#print ColumnSelectDropdown.get()
close_button = Button(ColumnSelectBar, text = "Close", command=self.program_quit) #moved to from button bar for better looks
close_button.pack(side = "right")
#-------
#define button actions
#-------
def program_quit(self):
self.report(END, "Shutting Down")
if askyesno('Verify Quit', 'Are you sure you want to quit?'):
self.master.destroy()
def import_csv(self):
self.report(END, "Importing...")
self.working_file = [] #resets the working file on import to allow for re-do in program
filename = tkFileDialog.askopenfilename(filetypes=[('CSV (Comma Deliminated)', '.csv')], defaultextension=".csv", title="Import CSV")
self.report(END,filename)
with open(filename, 'rb') as csvfile: #write in the CSV
#global imported_csv #modifies global
imported_file = csv.reader(csvfile, delimiter=',', quotechar='"')
self.imported_csv = []
row_count = -1 #"Take back one kadam to honor the Hebrew God, whose Ark this is"(remove one for the header)
for row in imported_file:
print ', '.join(row)
self.imported_csv.append(row)
row_count = row_count + 1
#print row_count #for debugging
print self.imported_csv
self.report(END,"Successfully imported %s records" % row_count)
self.Clm_select_variable.set('FILE IMPORTED') #default value, needed for the dropdown to work
self.ColumnSelectDropdown.set_menu('Please select which column the rules will be applied to', *self.imported_csv[0]) #gives it information to populate the optionmenu with
#copy over the list, so the original is preserved
def output(self):
self.report(END,"You asked for it...")
#print "Test that string!" #for debugging
if self.working_file == []: #Show the working file if it has something, otherwise show what was imported
for row in self.imported_csv: #dumps contents in rows
#print row #for debug, called in report function
self.report(END,row)
else:
for row in self.working_file:
self.report(END,row)
#select = self.Clm_select_variable.get() #must use .get() on variable not menu
#self.report(END,"The selected Column is: %s" % select)
def run_rules(self):
if self.Clm_select_variable.get() == "Please select which column the rules will be applied to": #if they didnt select a column, throw an error
showerror("Selection Not Made", "Please select a column to run rules on")#error popup using messagebox
else:
self.report(END,"Running the rules!")
if self.working_file == []: #if transition empty, then fill with selected column
active_col = 0 #sets variable
self.report(END,self.imported_csv[0]) #prints first row for debug
active_col = self.imported_csv[0].index(self.Clm_select_variable.get())#gets the column number of the selection
self.report(END,active_col) #prints for debugging
self.report(END,self.Clm_select_variable.get())#reports the name which will be run
for row in self.imported_csv:
self.working_file.append(row[active_col])#takes the entry from each row in main list and makes sub list @todo currently broken
#read that column into a transition file
#clean transition file
#run rules that are rule run yes on transition file
#return box name and amount if amount greater than 0
def modify_rules(self):
self.report(END,"This will pop up a new window")
#pop up new gui to have user add mod and del rules
#has checkbox for rule run yes/no
def RESET(self):
#global working_csv
if askyesno('Verify Reset', 'Are you sure you want to undo all rule work?'):
self.working_file = []
#save button, takes working file turns into output file,
def save_csv(self):
#add contents of working_file into imported_csv (probably a function)
self.report(END, "Saving...")
print self.imported_csv
exportname = tkFileDialog.asksaveasfilename(filetypes=[('CSV (Comma Deliminated)', '.csv')], defaultextension=".csv", title="Save As")
print exportname
# write contents of list into .csv
with open(exportname, 'wb') as csvfile:
output_file = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
#output_file.writerow(['please', 'work', 'damnit'])
output_file.writerows(self.imported_csv)
if __name__ == '__main__': Carnac().mainloop() #if I'm run as a script
|
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from backend.firebase_db_util import post_feed, get_last_feeds, get_reddit_url, update_desc, is_enabled, delete_feed
from backend.fcm_util import send_fcm
from backend.reddit_util import submit_to_reddit, edit_submission, delete_submission
from backend.scrape_util import format_text, scrape_site, smart_truncate
from backend.rss_util import parse_feed, find_feed_in_array
from backend.scopes import SCOPE_NEWS, SCOPE_UPLOADPLAN, SCOPE_PIETCAST, SCOPE_VIDEO
from backend.cloud_storage import store_image_in_gcloud, remove_image_from_gcloud
from backend.log_util import log
force = False
debug = False
nofcm = False
limit = -1
default_post_limit = 6
default_video_limit = 10
def check_for_update(scope):
global limit
# Check if master switch in db is off and abort if true
if not is_enabled():
if debug:
log("Info", "Master switch is off, ignoring (debug)")
else:
log("Warning", "Master switch is off, aborting")
return
# Set limit to default if not set manually
if limit is -1:
if scope is "video":
limit = default_video_limit
else:
limit = default_post_limit
log("Checking for " + scope)
website_feeds = parse_feed(scope, limit)
if len(website_feeds) == 0:
log("Error", "Pietsmiet.de feeds are empty, bad network or site down? Aborting")
return
# Load more db items than new ones to compare better (e.g. if there are deleted items in the db)
db_feed_limit = limit + 5
db_feeds = get_last_feeds(scope, db_feed_limit)
# Check that loading of db posts was successful
if db_feeds is None:
log("Error", "Cannot retrieve old feeds! Aborting")
return
# Check that there are posts in db, otherwise reload posts
if db_feeds is False:
log("Warning", "No feeds in db, loading all posts in db")
fetch_and_store(scope, 25)
return
# Check that all posts were loaded, otherwise reload posts
if len(db_feeds) is not db_feed_limit:
log("Error", "Loaded " + str(len(db_feeds)) + " feeds from db, should be " + str(db_feed_limit))
fetch_and_store(scope, 25)
return
# Iterate through every website feed and check if it is new (its title or link does _not_ match
# one of the old feeds)
new_feeds = {}
i = 0
for website_feed in website_feeds:
# Compare pietsmiet.de feed against all feeds from db
if (find_feed_in_array(website_feed, db_feeds) is False) or force:
new_feeds[i] = website_feed
i += 1
if (len(new_feeds) >= limit) and not force:
# All feeds changed, means there was probably a gap inbetween => Reload all posts into db
# This should only happen if the script wasn't running for a few days
log("Posts in db too old, loading all posts in db")
fetch_and_store(scope, 25)
elif len(new_feeds) == 0:
# No new posts found => Database should be the same as pietsmiet.de now,
# so we can check if there are invalid posts in db
log("Info", "No new posts found for scope " + scope)
check_deleted_posts(db_feeds, website_feeds)
if scope == SCOPE_UPLOADPLAN:
# Also check if the uploadplan was edited
website_feed = find_feed_in_array(db_feeds[0], website_feeds)
if website_feed is not False:
check_uploadplan_edited(db_feeds[0], website_feed)
else:
# Iterate through all new feeds and process them
for i, new_feed in new_feeds.items():
# New item found
process_new_item(new_feed, scope, i)
time.sleep(1)
def check_uploadplan_edited(old_feed, new_feed):
# Check if uploadplan desc changed
new_feed.desc = scrape_site(new_feed.link)
if new_feed.desc != old_feed.desc:
if debug:
log("Desc has changed, not putting into db because of debug")
return
if old_feed.reddit_url is not None:
edit_submission(format_text(new_feed), old_feed.reddit_url)
else:
# Inform about missing reddit url and still store the new desc to avoid spam of this
log("Warning", "No reddit url provided")
# Put the updated desc back into db
update_desc(new_feed)
def check_deleted_posts(db_feeds, website_feeds):
# Compare posts from db against the rss posts to make sure there are no deleted posts in the db
i = 0
for db_feed in db_feeds:
i += 1
is_deleted = True
for website_feed in website_feeds:
if (db_feed.title == website_feed.title) and (db_feed.date == website_feed.date) and (
db_feed.link == website_feed.link):
is_deleted = False
# We found the equivalent, break the loop
break
if is_deleted:
# There was no equivalent on pietsmiet.de, means it was probably deleted
# => Remove it from the database
log("Feed with title '" + db_feed.title + "' was in db but not on pietsmiet.de. Deleting from database!")
if not debug:
delete_feed(db_feed)
remove_image_from_gcloud(db_feed)
# Only compare db posts against the same size of pietsmiet.de posts
# because there are more db posts loaded than pietsmiet.de posts
if i >= len(website_feeds):
break
def process_new_item(feed, scope, i):
# Submit to firebase FCM & DB and if uploadplan to reddit
log("Debug", "New item in " + feed.scope + " with title: " + feed.title)
if (scope == SCOPE_UPLOADPLAN) or (scope == SCOPE_NEWS):
# Scrape site for the feed description
feed.desc = scrape_site(feed.link)
if scope == SCOPE_NEWS:
# Truncate the news description
feed.desc = smart_truncate(feed)
if scope == SCOPE_VIDEO:
if feed.image_url is not None:
# Store thumb in gcloud and send fcm
feed.image_url = store_image_in_gcloud(feed.image_url, feed)
if nofcm is True:
fcm_success = True
else:
fcm_success = send_fcm(feed, debug)
else:
# Don't send FCM as videos without thumbs are usually bad uploads and will be reuploaded
# Still store it in the DB if it just doesn't have a thumb for another reason
log("Warning", "No thumbnail found, means it's probably a bad upload. Not sending FCM!" +
"Title is \"" + feed.title + "\"")
fcm_success = True
else:
if nofcm is True:
fcm_success = True
else:
fcm_success = send_fcm(feed, debug)
if not fcm_success:
log("Error", "Could not send FCM, aborting!")
return
if (scope == SCOPE_UPLOADPLAN) and (i == 0):
# Don't submit old uploadplan: Only if it's the first new_feed and new, submit it
log("Submitting uploadplan to reddit")
time.sleep(1)
r_url = submit_to_reddit(feed.title, format_text(feed), debug=debug)
feed.reddit_url = r_url
if not debug:
post_feed(feed)
def fetch_and_store(scope, count):
website_feeds = parse_feed(scope, count)
log("Loading " + str(len(website_feeds)) + " items in " + scope)
for feed in website_feeds:
if (scope == SCOPE_UPLOADPLAN) or (scope == SCOPE_NEWS):
feed.desc = scrape_site(feed.link)
time.sleep(1)
if scope == SCOPE_NEWS:
feed.desc = smart_truncate(feed)
if (scope == SCOPE_VIDEO) and (feed.image_url is not None):
feed.image_url = store_image_in_gcloud(feed.image_url, feed)
if debug:
log("Not posting to firebase because of debug")
else:
post_feed(feed)
time.sleep(1)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--scope", required=False, choices=['uploadplan', 'news', 'video', 'pietcast', 'delete'],
help="The scope to load")
parser.add_argument("-d", "--debug", required=False, default=False, action='store_true',
help="This enables debug mode, which is basically a dry run. It'll not update the firebase db" +
"and only submit FCMs to the debug channel and reddit posts to r/l3d00m")
parser.add_argument("-n", "--nofcm", required=False, default=False, action='store_true',
help="This enables no notification mode, which is basically a dry run. It'll not send "
"notifications.")
parser.add_argument("-f", "--force", required=False, default=False, action='store_true',
help="This enables the dry run debug mode and simulates new posts even if there are no new posts.")
parser.add_argument("-a", "--loadall", required=False, type=int,
help="(Re)loads the specified amount of posts in all scopes into the database. " +
"Note: Limit for uploadplan, pietcast and news is always 8")
parser.add_argument("-l", "--limit", required=False, type=int, choices=range(2, 20),
help="Set a custom limit how many posts should be compared.")
args = parser.parse_args()
if args.debug:
log("Debug enabled.")
debug = True
if args.force:
log("Debug and force enabled.")
force = True
debug = True
if args.nofcm:
log("No FCM mode active.")
nofcm = True
if args.limit:
if args.loadall:
log("Limit ignored because it's specified in the --loadall parameter")
limit = int(args.limit)
log("Info", "Limit set to " + str(limit))
if args.loadall:
log("Loading all items to db. This will take a few minutes")
limit = int(args.loadall)
fetch_and_store(SCOPE_UPLOADPLAN, limit)
fetch_and_store(SCOPE_NEWS, limit)
fetch_and_store(SCOPE_VIDEO, limit)
fetch_and_store(SCOPE_PIETCAST, limit)
sys.exit()
if args.scope == 'uploadplan':
check_for_update(SCOPE_UPLOADPLAN)
elif args.scope == 'video':
check_for_update(SCOPE_VIDEO)
elif args.scope == 'pietcast':
check_for_update(SCOPE_PIETCAST)
elif args.scope == 'news':
check_for_update(SCOPE_NEWS)
elif args.scope == 'delete':
url = get_reddit_url()
if url is not None:
log("Deleting submission...")
delete_submission(url)
else:
log("Warning", "Couldn't delete submission, no URL in db")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
st = stochastic_tensor
sge = stochastic_gradient_estimators
dists = distributions
def _vimco(loss):
"""Python implementation of VIMCO."""
n = loss.shape[0]
log_loss = np.log(loss)
geometric_mean = []
for j in range(n):
geometric_mean.append(
np.exp(np.mean([log_loss[i, :] for i in range(n) if i != j], 0)))
geometric_mean = np.array(geometric_mean)
learning_signal = []
for j in range(n):
learning_signal.append(np.sum([loss[i, :] for i in range(n) if i != j], 0))
learning_signal = np.array(learning_signal)
local_learning_signal = np.log(1 / n * (learning_signal + geometric_mean))
# log_mean - local_learning_signal
log_mean = np.log(np.mean(loss, 0))
advantage = log_mean - local_learning_signal
return advantage
class StochasticGradientEstimatorsTest(test.TestCase):
def setUp(self):
self._p = constant_op.constant(0.999999)
self._final_loss = constant_op.constant(3.2)
def _testScoreFunction(self, loss_fn, expected):
x = st.StochasticTensor(dists.Bernoulli(probs=self._p), loss_fn=loss_fn)
sf = x.loss(self._final_loss)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(*sess.run([expected, sf]))
def testScoreFunction(self):
expected = math_ops.log(self._p) * self._final_loss
self._testScoreFunction(sge.score_function, expected)
def testScoreFunctionWithConstantBaseline(self):
b = constant_op.constant(9.8)
expected = math_ops.log(self._p) * (self._final_loss - b)
self._testScoreFunction(
sge.get_score_function_with_constant_baseline(b), expected)
def testScoreFunctionWithBaselineFn(self):
b = constant_op.constant(9.8)
def baseline_fn(stoch_tensor, loss):
self.assertTrue(isinstance(stoch_tensor, st.StochasticTensor))
self.assertTrue(isinstance(loss, ops.Tensor))
return b
expected = math_ops.log(self._p) * (self._final_loss - b)
self._testScoreFunction(
sge.get_score_function_with_baseline(baseline_fn), expected)
def testScoreFunctionWithMeanBaseline(self):
ema_decay = 0.8
num_steps = 6
x = st.StochasticTensor(
dists.Bernoulli(probs=self._p),
loss_fn=sge.get_score_function_with_baseline(
sge.get_mean_baseline(ema_decay)))
sf = x.loss(self._final_loss)
# Expected EMA value
ema = 0.
for _ in range(num_steps):
ema -= (1. - ema_decay) * (ema - self._final_loss)
# Baseline is EMA with bias correction
bias_correction = 1. - ema_decay**num_steps
baseline = ema / bias_correction
expected = math_ops.log(self._p) * (self._final_loss - baseline)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(num_steps - 1):
sess.run(sf) # run to update EMA
self.assertAllClose(*sess.run([expected, sf]))
def testScoreFunctionWithAdvantageFn(self):
b = constant_op.constant(9.8)
def advantage_fn(stoch_tensor, loss):
self.assertTrue(isinstance(stoch_tensor, st.StochasticTensor))
self.assertTrue(isinstance(loss, ops.Tensor))
return loss - b
expected = math_ops.log(self._p) * (self._final_loss - b)
self._testScoreFunction(
sge.get_score_function_with_advantage(advantage_fn), expected)
def testVIMCOAdvantageFn(self):
# simple_loss: (3, 2) with 3 samples, batch size 2
simple_loss = np.array(
[[1.0, 1.5],
[1e-6, 1e4],
[2.0, 3.0]])
# random_loss: (100, 50, 64) with 100 samples, batch shape (50, 64)
random_loss = 100 * np.random.rand(100, 50, 64)
advantage_fn = sge.get_vimco_advantage_fn(have_log_loss=False)
with self.test_session() as sess:
for loss in [simple_loss, random_loss]:
expected = _vimco(loss)
loss_t = constant_op.constant(loss, dtype=dtypes.float32)
advantage_t = advantage_fn(None, loss_t) # ST is not used
advantage = sess.run(advantage_t)
self.assertEqual(expected.shape, advantage_t.get_shape())
self.assertAllClose(expected, advantage, atol=5e-5)
def testVIMCOAdvantageGradients(self):
loss = np.log(
[[1.0, 1.5],
[1e-6, 1e4],
[2.0, 3.0]])
advantage_fn = sge.get_vimco_advantage_fn(have_log_loss=True)
with self.test_session():
loss_t = constant_op.constant(loss, dtype=dtypes.float64)
advantage_t = advantage_fn(None, loss_t) # ST is not used
gradient_error = gradient_checker.compute_gradient_error(
loss_t,
loss_t.get_shape().as_list(),
advantage_t,
advantage_t.get_shape().as_list(),
x_init_value=loss)
self.assertLess(gradient_error, 1e-3)
def testVIMCOAdvantageWithSmallProbabilities(self):
theta_value = np.random.rand(10, 100000)
# Test with float16 dtype to ensure stability even in this extreme case.
theta = constant_op.constant(theta_value, dtype=dtypes.float16)
advantage_fn = sge.get_vimco_advantage_fn(have_log_loss=True)
with self.test_session() as sess:
log_loss = -math_ops.reduce_sum(theta, [1])
advantage_t = advantage_fn(None, log_loss)
grad_t = gradients_impl.gradients(advantage_t, theta)[0]
advantage, grad = sess.run((advantage_t, grad_t))
self.assertTrue(np.all(np.isfinite(advantage)))
self.assertTrue(np.all(np.isfinite(grad)))
def testScoreFunctionWithMeanBaselineHasUniqueVarScope(self):
ema_decay = 0.8
x = st.StochasticTensor(
dists.Bernoulli(probs=self._p),
loss_fn=sge.get_score_function_with_baseline(
sge.get_mean_baseline(ema_decay)))
y = st.StochasticTensor(
dists.Bernoulli(probs=self._p),
loss_fn=sge.get_score_function_with_baseline(
sge.get_mean_baseline(ema_decay)))
sf_x = x.loss(self._final_loss)
sf_y = y.loss(self._final_loss)
with self.test_session() as sess:
# Smoke test
sess.run(variables.global_variables_initializer())
sess.run([sf_x, sf_y])
if __name__ == "__main__":
test.main()
|
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None):
"""Inception model from http://arxiv.org/abs/1602.07261.
Constructs an Inception Resnet v2 network from inputs to the given final
endpoint. This method can construct the network up to the final inception
block Conv2d_7b_1x1.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the network
to SAME padding so that the feature maps are aligned.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or if the output_stride is not 8 or 16, or if the output_stride is 8 and
we request an end point after 'PreAuxLogits'.
"""
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
if add_and_check_final('Mixed_5b', net): return net, end_points
# TODO(alemi): Register intermediate endpoints
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1088 if output_stride == 8,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
if add_and_check_final('Mixed_6a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1):
net = slim.repeat(net, 20, block17, scale=0.10)
if add_and_check_final('PreAuxLogits', net): return net, end_points
if output_stride == 8:
# TODO(gpapan): Properly support output_stride for the rest of the net.
raise ValueError('output_stride==8 is only supported up to the '
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
if add_and_check_final('Mixed_7a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
# 8 x 8 x 1536
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points
raise ValueError('final_endpoint (%s) not recognized', final_endpoint)
def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2',
create_aux_logits=True):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_resnet_v2_base(inputs, scope=scope)
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, num_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_resnet_v2.default_image_size = 299
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
|
|
__file__ = 'IRI_v7'
__date__ = '1/28/2016'
__author__ = 'ABREZNIC'
"""
The MIT License (MIT)
Copyright (c) 2016 Texas Department of Transportation
Author: Adam Breznicky
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import arcpy, os, datetime, csv
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
input = arcpy.GetParameterAsText(0)
calRhino = arcpy.GetParameterAsText(1)
output = arcpy.GetParameterAsText(2)
# theMXD = "C:\\TxDOT\\Projects\\IRI_dan\\working\\Untitled.mxd"
inputlist = input.split(";")
inputcntr = 1
lengthinput = len(inputlist)
issuesReport = [["DISTRICT_FILE", "ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "ERROR_DESCRIPTION"]]
statsReport = [["DISTRICT_FILE", "LG Record Count", "KG Record Count", "Total Records Count", "Input Record Count", "Lost Records Count", "LG Records Length", "KG Records Length", "Total Routed Length"]]
arcpy.CreateFileGDB_management(output, "RhinoLines.gdb")
rhinospace = output + os.sep + "RhinoLines.gdb"
rhino_lines = rhinospace + os.sep + "rhinolines"
# arcpy.Copy_management(calRhino, rhino_lines)
arcpy.FeatureClassToFeatureClass_conversion(calRhino, rhinospace, "rhinolines")
# arcpy.AddField_management(rhino_lines, "FRM_DFO", "DOUBLE")
# arcpy.AddField_management(rhino_lines, "TO_DFO", "DOUBLE")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["FRM_DFO", "TO_DFO", 'SHAPE@', "OBJECTID"])
for row in cursor:
# arcpy.AddMessage(row[3])
bp = row[2].firstPoint.M
ep = row[2].lastPoint.M
bpNew = float(format(float(bp), '.3f'))
epNew = float(format(float(ep), '.3f'))
row[0] = bpNew
row[1] = epNew
cursor.updateRow(row)
del cursor
del row
arcpy.AddMessage("Calibrated RHINO copied local.")
arcpy.AddField_management(rhino_lines, "RTE_ORDER", "SHORT")
arcpy.AddField_management(rhino_lines, "FLAG", "TEXT", "", "", 30)
arcpy.AddMessage("Applying RTE_ORDER.")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["RIA_RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG"], "", "", "", (None, "ORDER BY RIA_RTE_ID ASC, FRM_DFO ASC"))
# cursor = arcpy.da.UpdateCursor(rhino_lines, ["RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"], "", "", "", (None, "ORDER BY RTE_ID ASC, FRM_DFO ASC"))
counter = 0
order = 1
previous = ""
for row in cursor:
current = row[0]
if counter == 0:
row[2] = order
elif counter != 0 and previous == current:
order += 1
row[2] = order
else:
order = 1
row[2] = order
previous = current
counter += 1
# ru = int(row[4])
# fs = int(row[5])
# nhs = int(row[6])
# row[3] = current + "-" + str(order) + "-" + str(ru) + "-" + str(fs) + "-" + str(nhs) + "-" + str(row[7])
row[3] = current + "-" + str(order)
cursor.updateRow(row)
del cursor
arcpy.AddMessage("RTE_ORDER applied.")
dictionary = {}
cursor = arcpy.da.SearchCursor(rhino_lines, ["FLAG", "FRM_DFO", "TO_DFO"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
fDFO = row[1]
tDFO = row[2]
dictionary[odr] = [fDFO, tDFO]
del cursor
for excel in inputlist:
distName = str(excel).split("\\")[-1]
if distName[-1] == "$":
distName = distName[:-1]
if distName[-4:] == ".dbf":
distName = distName[:-4]
arcpy.AddMessage("Beginning " + str(inputcntr) + " of " + str(lengthinput) + ": " + distName)
arcpy.CreateFileGDB_management(output, "Wrkg" + str(inputcntr) + ".gdb")
workspace = output + os.sep + "Wrkg" + str(inputcntr) + ".gdb"
arcpy.AddMessage("Working database created.")
data = []
lg = []
fields = ["ROUTE_ID", "BEGIN_POIN", "END_POINT", "SECTION_LE", "IRI", "RUTTING", "DATE", "LANE"]
# fields = ["ROUTE_ID", "BEGIN_POIN", "END_POINT", "SECTION_LE", "IRI", "RUTTING", "DATE"]
data.append(fields)
lg.append(fields)
# spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\GCS_WGS_1984.prj"
spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\WGS 1984.prj"
arcpy.MakeXYEventLayer_management(excel, "Long", "Lat", "pointEvents" + str(inputcntr), spref)
arcpy.AddMessage("Event Layer created.")
pntfeature = workspace + os.sep + "allPoints"
arcpy.CopyFeatures_management("pointEvents" + str(inputcntr), pntfeature)
arcpy.AddMessage("Point feature class created.")
initial = 0
ids = []
cursor = arcpy.da.SearchCursor(pntfeature, ["ROUTE_ID", "LANE"])
for row in cursor:
id = row[0]
lane = row[1]
combo = id + "-" + lane
initial += 1
if combo not in ids:
ids.append(combo)
del cursor
del row
arcpy.AddMessage("RTE_IDs compiled.")
roadslayer = ""
pointslayer = ""
# mxd = arcpy.mapping.MapDocument(theMXD)
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
arcpy.mapping.RemoveLayer(df, lyr)
if lyr.name == "allPoints":
arcpy.mapping.RemoveLayer(df, lyr)
newlayerpnt = arcpy.mapping.Layer(pntfeature)
arcpy.mapping.AddLayer(df, newlayerpnt)
newlayerline = arcpy.mapping.Layer(rhino_lines)
arcpy.mapping.AddLayer(df, newlayerline)
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
roadslayer = lyr
if lyr.name == "allPoints":
pointslayer = lyr
arcpy.AddMessage("Layers acquired.")
counter = 1
total = len(ids)
arcpy.AddMessage("Finding measures for: ")
for combo in ids:
id = combo.split("-")[0] + "-" + combo.split("-")[1]
lane = combo.split("-")[2]
roadslayer.definitionQuery = " RIA_RTE_ID = '" + id + "' "
pointslayer.definitionQuery = " ROUTE_ID = '" + id + "' AND LANE = '" + lane + "'"
arcpy.RefreshActiveView()
arcpy.AddMessage(str(counter) + "/" + str(total) + " " + combo)
label = combo.replace("-", "")
arcpy.LocateFeaturesAlongRoutes_lr(pointslayer, roadslayer, "FLAG", "230 Feet", workspace + os.sep + label, "FLAG POINT END_POINT")
counter += 1
arcpy.AddMessage("Tables created.")
# alltables = []
arcpy.env.workspace = workspace
tables = arcpy.ListTables()
for table in tables:
arcpy.AddMessage(table)
arcpy.AddField_management(table, "ODR_FLAG", "TEXT", "", "", 20)
arcpy.AddMessage("Order Flag field created.")
numbDict = {}
cursor = arcpy.da.UpdateCursor(table, ["FLAG", "ODR_FLAG"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
if odr not in numbDict.keys():
numbDict[odr] = 1
else:
curNumb = numbDict[odr]
curNumb += 1
numbDict[odr] = curNumb
row[1] = odr
cursor.updateRow(row)
del cursor
counter = 1
previous = ""
last = ""
# cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC"))
cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POIN", "END_POINT", "SECTION_LE"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC"))
for row in cursor:
current = row[0]
total = numbDict[current]
if counter == 1 and counter != total:
values = dictionary[current]
beginner = float(format(float(values[0]), '.3f'))
segEnd = float(format(float(row[2]), '.3f'))
if abs(segEnd - beginner) > 1:
segSrt = segEnd - .1
row[1] = float(format(float(segSrt), '.3f'))
row[2] = segEnd
row[3] = round(row[2] - row[1], 3)
else:
row[1] = beginner
row[2] = segEnd
row[3] = round(row[2] - row[1], 3)
elif counter == 1 and counter == total:
values = dictionary[current]
row[1] = float(format(float(values[0]), '.3f'))
row[2] = float(format(float(values[1]), '.3f'))
row[3] = round(row[2] - row[1], 3)
counter = 0
elif previous == current and counter != total:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = round(row[2] - last, 3)
elif previous == current and counter == total:
values = dictionary[current]
ender = float(format(float(values[1]), '.3f'))
if abs(ender - last) > 1:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = round(row[2] - last, 3)
else:
row[1] = last
row[2] = float(format(float(values[1]), '.3f'))
row[3] = round(row[2] - last, 3)
counter = 0
else:
arcpy.AddMessage("problem with " + current)
last = row[2]
cursor.updateRow(row)
previous = current
counter += 1
del cursor
arcpy.AddMessage("Measure difference fields populated.")
arcpy.Merge_management(tables, workspace + os.sep + "merged")
arcpy.AddMessage("All tables merged successfully.")
# arcpy.AddField_management(workspace + os.sep + "merged", "RU", "TEXT", "", "", 5)
# arcpy.AddMessage("RU field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "F_SYSTEM", "TEXT", "", "", 5)
# arcpy.AddMessage("Functional System field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "SEC_NHS", "TEXT", "", "", 5)
# arcpy.AddMessage("NHS field created.")
# arcpy.AddField_management(workspace + os.sep + "merged", "HPMS", "TEXT", "", "", 5)
# arcpy.AddMessage("HPMS Keeper field created.")
# arcpy.AddMessage("Fields created.")
# cursor = arcpy.da.UpdateCursor(workspace + os.sep + "merged", ["FLAG", "RU", "F_SYSTEM", "SEC_NHS"])
## cursor = arcpy.da.UpdateCursor(workspace + os.sep + "merged", ["FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"])
# for row in cursor:
# flag = row[0]
# row[1] = flag.split("-")[3]
# row[2] = flag.split("-")[4]
# row[3] = flag.split("-")[5]
# # row[4] = flag.split("-")[6]
# cursor.updateRow(row)
# del cursor
LGcounter = 0
KGcounter = 0
LGlength = 0
KGlength = 0
cursor = arcpy.da.SearchCursor(workspace + os.sep + "merged", fields, None, None, False, (None, "ORDER BY ROUTE_ID ASC, LANE ASC, BEGIN_POIN ASC"))
for row in cursor:
id = row[0]
if id[-2:] == "LG":
lg.append(row)
LGcounter += 1
LGlength += float(row[3])
elif id[-2:] == "RG":
THEid = id[:-2]
newid = THEid + "KG"
fixed = [newid, row[1], row[2], row[3], row[4], row[5], row[6], row[7]]
# fixed = [newid, row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10]]
data.append(fixed)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
else:
data.append(row)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, id, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, id, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
del cursor
arcpy.AddMessage("Data compiled.")
arcpy.AddMessage("Creating CSV reports.")
leftover = open(output + os.sep + distName + "_LG.csv", 'wb')
writer = csv.writer(leftover)
writer.writerows(lg)
leftover.close()
final = open(output + os.sep + distName + "_Plotted.csv", 'wb')
writer = csv.writer(final)
writer.writerows(data)
final.close()
arcpy.AddMessage("CSV written locally.")
arcpy.AddMessage("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_LG.csv")
leftover = open("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_LG.csv", 'wb')
writer = csv.writer(leftover)
writer.writerows(lg)
leftover.close()
final = open("T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script" + os.sep + distName + "_Plotted.csv", 'wb')
writer = csv.writer(final)
writer.writerows(data)
final.close()
arcpy.AddMessage("CSV written to T drive.")
pointsName = distName.split("_")[-1]
arcpy.FeatureClassToFeatureClass_conversion(pntfeature, "T:\\DATAMGT\\HPMS-DATA\\2015Data\\Pavement\\IRI\\IRIData\\Output_From_Script\\All_Points.gdb", pointsName)
arcpy.AddMessage("allpoints feature class transferred to T drive.")
TOTALcounter = LGcounter + KGcounter
TOTALlength = LGlength + KGlength
DIFFcounter = initial - TOTALcounter
statsReport.append([distName, LGcounter, KGcounter, TOTALcounter, initial, DIFFcounter, LGlength, KGlength, TOTALlength])
inputcntr += 1
if len(issuesReport) > 1:
arcpy.AddMessage("Creating errors report...")
errors = open(output + os.sep + "00ISSUES_Investigate.csv", 'wb')
writer = csv.writer(errors)
writer.writerows(issuesReport)
errors.close()
arcpy.AddMessage("Creating stats report...")
stats = open(output + os.sep + "00Statistics.csv", 'wb')
writer = csv.writer(stats)
writer.writerows(statsReport)
stats.close()
arcpy.AddMessage("that's all folks!")
arcpy.AddMessage("started: " + str(now))
now2 = datetime.datetime.now()
arcpy.AddMessage("ended: " + str(now2))
print "that's all folks!"
|
|
"""
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from pip._vendor import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
name = idna_encode(name)
if sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
# This is technically using private APIs, but should work across all
# relevant versions until PyOpenSSL gets something proper for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, x509.UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
names = [
('DNS', _dnsname_to_stdlib(name))
for name in ext.get_values_for_type(x509.DNSName)
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
wr = util.wait_for_write(self.socket, self.socket.gettimeout())
if not wr:
raise timeout()
continue
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(sock, sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
|
|
# -*- coding: utf-8 -*-
"""
pysteps.io.interface
====================
Interface for the io module.
.. currentmodule:: pysteps.io.interface
.. autosummary::
:toctree: ../generated/
get_method
"""
import importlib
from pkg_resources import iter_entry_points
from pysteps import io
from pysteps.decorators import postprocess_import
from pysteps.io import importers, exporters
from pprint import pprint
_importer_methods = dict(
bom_rf3=importers.import_bom_rf3,
fmi_geotiff=importers.import_fmi_geotiff,
fmi_pgm=importers.import_fmi_pgm,
mch_gif=importers.import_mch_gif,
mch_hdf5=importers.import_mch_hdf5,
mch_metranet=importers.import_mch_metranet,
mrms_grib=importers.import_mrms_grib,
odim_hdf5=importers.import_odim_hdf5,
opera_hdf5=importers.import_opera_hdf5,
knmi_hdf5=importers.import_knmi_hdf5,
saf_crri=importers.import_saf_crri,
)
_exporter_methods = dict(
geotiff=exporters.initialize_forecast_exporter_geotiff,
kineros=exporters.initialize_forecast_exporter_kineros,
netcdf=exporters.initialize_forecast_exporter_netcdf,
)
def discover_importers():
"""
Search for installed importers plugins in the entrypoint 'pysteps.plugins.importers'
The importers found are added to the `pysteps.io.interface_importer_methods`
dictionary containing the available importers.
"""
# The pkg resources needs to be reload to detect new packages installed during
# the execution of the python application. For example, when the plugins are
# installed during the tests
import pkg_resources
importlib.reload(pkg_resources)
for entry_point in pkg_resources.iter_entry_points(
group="pysteps.plugins.importers", name=None
):
_importer = entry_point.load()
importer_function_name = _importer.__name__
importer_short_name = importer_function_name.replace("import_", "")
_postprocess_kws = getattr(_importer, "postprocess_kws", dict())
_importer = postprocess_import(**_postprocess_kws)(_importer)
if importer_short_name not in _importer_methods:
_importer_methods[importer_short_name] = _importer
else:
RuntimeWarning(
f"The importer identifier '{importer_short_name}' is already available in"
"'pysteps.io.interface._importer_methods'.\n"
f"Skipping {entry_point.module_name}:{'.'.join(entry_point.attrs)}"
)
if hasattr(importers, importer_function_name):
RuntimeWarning(
f"The importer function '{importer_function_name}' is already an attribute"
"of 'pysteps.io.importers`.\n"
f"Skipping {entry_point.module_name}:{'.'.join(entry_point.attrs)}"
)
else:
setattr(importers, importer_function_name, _importer)
def importers_info():
"""Print all the available importers."""
# Importers available in the `io.importers` module
available_importers = [
attr for attr in dir(io.importers) if attr.startswith("import_")
]
print("\nImporters available in the pysteps.io.importers module")
pprint(available_importers)
# Importers declared in the pysteps.io.get_method interface
importers_in_the_interface = [
f.__name__ for f in io.interface._importer_methods.values()
]
print("\nImporters available in the pysteps.io.get_method interface")
pprint(
[
(short_name, f.__name__)
for short_name, f in io.interface._importer_methods.items()
]
)
# Let's use sets to find out if there are importers present in the importer module
# but not declared in the interface, and viceversa.
available_importers = set(available_importers)
importers_in_the_interface = set(importers_in_the_interface)
difference = available_importers ^ importers_in_the_interface
if len(difference) > 0:
print("\nIMPORTANT:")
_diff = available_importers - importers_in_the_interface
if len(_diff) > 0:
print(
"\nIMPORTANT:\nThe following importers are available in pysteps.io.importers module "
"but not in the pysteps.io.get_method interface"
)
pprint(_diff)
_diff = importers_in_the_interface - available_importers
if len(_diff) > 0:
print(
"\nWARNING:\n"
"The following importers are available in the pysteps.io.get_method "
"interface but not in the pysteps.io.importers module"
)
pprint(_diff)
return available_importers, importers_in_the_interface
def get_method(name, method_type):
"""
Return a callable function for the method corresponding to the given
name.
Parameters
----------
name: str
Name of the method. The available options are:\n
Importers:
.. tabularcolumns:: |p{2cm}|L|
+--------------+------------------------------------------------------+
| Name | Description |
+==============+======================================================+
| bom_rf3 | NefCDF files used in the Boreau of Meterorology |
| | archive containing precipitation intensity |
| | composites. |
+--------------+------------------------------------------------------+
| fmi_geotiff | GeoTIFF files used in the Finnish Meteorological |
| | Institute (FMI) archive, containing reflectivity |
| | composites (dBZ). |
+--------------+------------------------------------------------------+
| fmi_pgm | PGM files used in the Finnish Meteorological |
| | Institute (FMI) archive, containing reflectivity |
| | composites (dBZ). |
+--------------+------------------------------------------------------+
| knmi_hdf5 | HDF5 file format used by KNMI. |
+--------------+------------------------------------------------------+
| mch_gif | GIF files in the MeteoSwiss (MCH) archive containing |
| | precipitation composites. |
+--------------+------------------------------------------------------+
| mch_hdf5 | HDF5 file format used by MeteoSiss (MCH). |
+--------------+------------------------------------------------------+
| mch_metranet | metranet files in the MeteoSwiss (MCH) archive |
| | containing precipitation composites. |
+--------------+------------------------------------------------------+
| mrms_grib | Grib2 files used by the NSSL's MRMS product |
+--------------+------------------------------------------------------+
| odim_hdf5 | HDF5 file conforming to the ODIM specification. |
+--------------+------------------------------------------------------+
| opera_hdf5 | Wrapper to "odim_hdf5" to maintain backward |
| | compatibility with previous pysteps versions. |
+--------------+------------------------------------------------------+
| saf_crri | NetCDF SAF CRRI files containing convective rain |
| | rate intensity and other |
+--------------+------------------------------------------------------+
Exporters:
.. tabularcolumns:: |p{2cm}|L|
+-------------+-------------------------------------------------------+
| Name | Description |
+=============+=======================================================+
| geotiff | Export as GeoTIFF files. |
+-------------+-------------------------------------------------------+
| kineros | KINEROS2 Rainfall file as specified in |
| | https://www.tucson.ars.ag.gov/kineros/. |
| | Grid points are treated as individual rain gauges. |
| | A separate file is produced for each ensemble member. |
+-------------+-------------------------------------------------------+
| netcdf | NetCDF files conforming to the CF 1.7 specification. |
+-------------+-------------------------------------------------------+
method_type: {'importer', 'exporter'}
Type of the method (see tables above).
"""
if isinstance(method_type, str):
method_type = method_type.lower()
else:
raise TypeError(
"Only strings supported for for the method_type"
+ " argument\n"
+ "The available types are: 'importer' and 'exporter'"
) from None
if isinstance(name, str):
name = name.lower()
else:
raise TypeError(
"Only strings supported for the method's names.\n"
+ "Available importers names:"
+ str(list(_importer_methods.keys()))
+ "\nAvailable exporters names:"
+ str(list(_exporter_methods.keys()))
) from None
if method_type == "importer":
methods_dict = _importer_methods
elif method_type == "exporter":
methods_dict = _exporter_methods
else:
raise ValueError(
"Unknown method type {}\n".format(name)
+ "The available types are: 'importer' and 'exporter'"
) from None
try:
return methods_dict[name]
except KeyError:
raise ValueError(
"Unknown {} method {}\n".format(method_type, name)
+ "The available methods are:"
+ str(list(methods_dict.keys()))
) from None
|
|
#!/usr/bin/python
"""
Test some SPARQL queries
"""
import os, os.path
import sys
import re
import shutil
import unittest
import logging
import datetime
import StringIO
import rdflib
# Stand-alone test: don't import from ro_prefixes
prefixes = (
[ ("", "http://example.org/")
, ("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
, ("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
, ("owl", "http://www.w3.org/2002/07/owl#")
, ("xsd", "http://www.w3.org/2001/XMLSchema#")
, ("xml", "http://www.w3.org/XML/1998/namespace")
])
turtle_prefixstr = "\n".join([ "@prefix %s: <%s> ."%p for p in prefixes ]) + "\n\n"
sparql_prefixstr = "\n".join([ "PREFIX %s: <%s>"%p for p in prefixes ]) + "\n\n"
class TestSparqlQueries(unittest.TestCase):
# Query test helpers
def doQuery(self, graph, query, format="n3", initBindings=None):
g = rdflib.Graph()
s = StringIO.StringIO(turtle_prefixstr+graph)
g.parse(s, format=format)
# print "----"
# print sparql_prefixstr+query
# print "----"
return g.query(sparql_prefixstr+query, initBindings=initBindings)
def doAskQuery(self, graph, query, expect=True, format="n3", initBindings=None):
r = self.doQuery(graph, query, format, initBindings)
self.assertEqual(r.type, "ASK", "Unexpected query response type: %s"%(r.type))
self.assertEqual(r.askAnswer, expect, "Unexpected query response %s"%(r.askAnswer))
return r.askAnswer
def doSelectQuery(self, graph, query, expect=None, format="n3", initBindings=None):
r = self.doQuery(graph, query, format, initBindings)
self.assertEqual(r.type, "SELECT", "Unexpected query response type: %s"%(r.type))
self.assertEqual(len(r.bindings), expect, "Unexpected number of query matches %d"%(len(r.bindings)))
return r.bindings
# Query tests
def testSimpleSelectQuery(self):
g = """
:s1 :p :o1 .
:s2 :p :o2 .
"""
q = """
SELECT * WHERE { :s2 :p ?o }
"""
r = self.doQuery(g, q)
self.assertEqual(r.type, "SELECT", "Unexpected query response type: %s"%(r.type))
self.assertEqual(len(r.bindings), 1, "Unexpected number of query matches %d"%(len(r.bindings)))
# print "----"
# print repr(r.bindings)
# print "----"
b = r.bindings[0]
self.assertEqual(len(b), 1)
self.assertEqual(b['o'], rdflib.URIRef("http://example.org/o2"))
return
def testDatatypeFilter(self):
g = """
:s1 :p1 "text" .
:s2 :p2 2 .
"""
q1 = """
ASK { :s1 :p1 ?o }
"""
q2 = """
ASK { :s1 :p1 ?o FILTER (datatype(?o) = xsd:string) }
"""
q3 = """
ASK { :s1 :p1 ?o FILTER (datatype(?o) = xsd:integer) }
"""
q4 = """
ASK { :s2 :p2 ?o }
"""
q5 = """
ASK { :s2 :p2 ?o FILTER (datatype(?o) = xsd:string) }
"""
q6 = """
ASK { :s2 :p2 ?o FILTER (datatype(?o) = xsd:integer) }
"""
self.doAskQuery(g, q1, True)
self.doAskQuery(g, q2, True)
self.doAskQuery(g, q3, False)
self.doAskQuery(g, q4, True)
self.doAskQuery(g, q5, False)
self.doAskQuery(g, q6, True)
return
def testIntegerStringFilter(self):
g = """
:s1 :p1 "111" .
:s2 :p2 222 .
:s3 :p3 "notaninteger" .
"""
# Note:
#
# "FILTERs eliminate any solutions that, when substituted into the expression,
# either result in an effective boolean value of false or produce an error"
# -- http://www.w3.org/TR/rdf-sparql-query/#tests.
#
# Further, the str() of any valid integer is a non-blank string, which in SPARQL
# yields an equivalent boolean value (EBV) of True.
# Thus, only valid integer literals should be accepted.
#
q1 = """
ASK { :s1 :p1 ?value . FILTER ( str(xsd:integer(?value)) ) }
""" ;
q2 = """
ASK { :s2 :p2 ?value . FILTER ( str(xsd:integer(?value)) ) }
""" ;
q3 = """
ASK { :s3 :p3 ?value . FILTER ( str(xsd:integer(?value)) ) }
""" ;
q3s = """
SELECT * WHERE { :s3 :p3 ?value . FILTER ( str(xsd:integer(?value)) ) }
""" ;
self.doAskQuery(g, q1, True)
self.doAskQuery(g, q2, True) # Is this correct?
r = self.doSelectQuery(g, q3s, expect=0)
# print "\n----\n%s\n----"%(repr(r))
self.doAskQuery(g, q3, False)
return
def testRegexFilter(self):
g = """
:s1 :p1 "111" .
:s2 :p2 222 .
:s3 :p3 "notaninteger" .
"""
q1 = """
ASK { :s1 :p1 ?value . FILTER(regex(?value, "^\\\\d+$")) }
""" ;
q2 = """
ASK { :s2 :p2 ?value . FILTER(regex(?value, "^\\\\d+$")) }
""" ;
q3 = """
ASK { :s3 :p3 ?value . FILTER(regex(?value, "^\\\\d+$")) }
""" ;
self.doAskQuery(g, q1, True)
self.doAskQuery(g, q2, False) # Is this correct?
self.doAskQuery(g, q3, False)
return
@unittest.skip("Default test not working")
def testDefaultQuery(self):
g1 = """
:s1 a :test ; rdfs:label "s1" .
"""
g2 = """
:s2 a :test .
"""
q1 = """
SELECT * WHERE
{
?s a :test .
OPTIONAL { ?s rdfs:label ?label }
OPTIONAL { filter(!bound(?label)) BIND(str(?s) as ?label) }
}
"""
r1 = self.doSelectQuery(g1, q1, expect=1)
print "\n----\n%s\n----"%(repr(r1))
self.assertEqual(r1[0]['s'], rdflib.URIRef("http://example.org/s1"))
self.assertEqual(r1[0]['label'], rdflib.Literal("s1"))
r2 = self.doSelectQuery(g2, q1, expect=1)
print "\n----\n%s\n----"%(repr(r2))
self.assertEqual(r2[0]['s'], rdflib.URIRef("http://example.org/s2"))
self.assertEqual(r2[0]['label'], rdflib.Literal("http://example.org/s2"))
return
def testRepeatedValueQuery1(self):
g = """
:s1 a :test1, :test2 ; rdfs:label "s1" .
:s2 a :test3 ; rdfs:seeAlso :s1 .
"""
q1 = """
ASK { ?s a :test1, :test2 ; rdfs:label ?slab }
"""
q2 = """
ASK { ?s a :test1 ; a :test2 ; rdfs:label ?slab }
"""
self.doAskQuery(g, q1, True)
#self.doAskQuery(g, q2, True)
return
def testRepeatedValueQuery2(self):
g = """
:s1 a :test1, :test2 ; rdfs:label "s1" .
:s2 a :test3 ; rdfs:seeAlso :s1 .
"""
q1 = """
ASK { ?s a :test1, :test2 ; rdfs:label ?slab }
"""
q2 = """
ASK { ?s a :test1 ; a :test2 ; rdfs:label ?slab }
"""
#self.doAskQuery(g, q1, True)
self.doAskQuery(g, q2, True)
return
# Related tests
def testLiteralCompare(self):
self.assertEqual(rdflib.Literal("def").value, "def")
lit111 = rdflib.Literal("111", datatype=rdflib.URIRef("http://www.w3.org/2001/XMLSchema#integer"))
self.assertEqual(lit111.value, 111)
return
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=1)
tests = unittest.TestSuite()
tests.addTest(TestSparqlQueries("testSimpleSelectQuery"))
tests.addTest(TestSparqlQueries("testDatatypeFilter"))
tests.addTest(TestSparqlQueries("testIntegerStringFilter"))
tests.addTest(TestSparqlQueries("testRegexFilter"))
tests.addTest(TestSparqlQueries("testDefaultQuery"))
tests.addTest(TestSparqlQueries("testRepeatedValueQuery1"))
tests.addTest(TestSparqlQueries("testRepeatedValueQuery2"))
tests.addTest(TestSparqlQueries("testLiteralCompare"))
runner.run(tests)
# End.
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from nova.api.openstack.compute import consoles
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(utils.gen_uuid())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTest(test.TestCase):
def setUp(self):
super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(utils.gen_uuid())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertDictMatch(res_dict, expected)
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertDictMatch(res_dict, expected)
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
class TestConsolesXMLSerializer(test.TestCase):
def test_show(self):
fixture = {'console': {'id': 20,
'password': 'fake_password',
'port': 'fake_port',
'host': 'fake_hostname',
'console_type': 'fake_type'}}
output = consoles.ConsoleTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'console')
self.assertEqual(res_tree.xpath('id')[0].text, '20')
self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
def test_index(self):
fixture = {'consoles': [{'console': {'id': 10,
'console_type': 'fake_type'}},
{'console': {'id': 11,
'console_type': 'fake_type2'}}]}
output = consoles.ConsolesTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'consoles')
self.assertEqual(len(res_tree), 2)
self.assertEqual(res_tree[0].tag, 'console')
self.assertEqual(res_tree[1].tag, 'console')
self.assertEqual(len(res_tree[0]), 1)
self.assertEqual(res_tree[0][0].tag, 'console')
self.assertEqual(len(res_tree[1]), 1)
self.assertEqual(res_tree[1][0].tag, 'console')
self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
'fake_type')
self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
'fake_type2')
|
|
class Vakai(In.entity.Entity):
'''Vakai Entity class.'''
def __init__(self, data = None, items = None, **args):
self.weight = 0
super().__init__(data, items, **args)
self.css.append(self.__type__)
@IN.register('Vakai', type = 'Entitier')
class VakaiEntitier(In.entity.EntityEntitier):
'''Base Vakai Entitier'''
# Vakai needs entity insert/update/delete hooks
invoke_entity_hook = True
# load all is very heavy
entity_load_all = True
parent_field_name = 'field_vakai_parent'
title_field_name = 'field_vakai_title'
def entity_context_links(self, entity, context_type):
entitier = IN.entitier
# no view access
if not entitier.access('view', entity):
return
id_suffix = '-'.join((entity.__type__, str(entity.id)))
output = super().entity_context_links(entity, context_type)
if context_type == 'links':
if entitier.access('edit', entity):
edit = Object.new(type = 'Link', data = {
'id' : 'edit-link-' + id_suffix,
'css' : ['i-button i-button-small'],
'value' : s('edit'),
'href' : '/'.join(('/vakai', str(entity.id), 'edit')),
'weight' : 0,
})
output[edit.id] = edit
if entitier.access('delete', entity):
delete = Object.new(type = 'Link', data = {
'id' : 'delete-link-' + id_suffix,
'css' : ['ajax-modal i-button i-button-small'],
'value' : s('delete'),
'href' : '/'.join(('/vakai', str(entity.id), 'delete', 'confirm')),
'weight' : 1,
})
output[delete.id] = delete
if entitier.access('add', entity):
try:
bundle = entity.type
reply = Object.new(type = 'TextDiv', data = {
'id' : 'add_sub-link-' + id_suffix,
'css' : ['i-button i-button-small'],
'value' : s('Add sub vakai'),
'weight' : 3,
'attributes' : {
'data-ajax_type' : 'POST',
'data-href' : '/vakai/add/sub/!' + '/'.join((bundle, str(entity.id))),
},
})
output[reply.id] = reply
except Exception:
IN.logger.debug()
return output
def delete(self, entity, commit = True):
'''Recursively delete coments and its sub coments'''
#result = entity.Model.delete(entity, commit)
# Instead of delete and its all sub vakais
# just disable this vakai only
try:
db = IN.db
connection = db.connection
entity.Model.delete(entity, commit)
cursor = db.execute('''SELECT
field_vakai_parent.value
FROM
config.vakai,
field.field_vakai_parent
WHERE
vakai.id = field_vakai_parent.entity_id AND
vakai.id = %(parent_id)s AND
vakai.status > 0 AND
field_vakai_parent.value > 0
''', {
'parent_id' : entity.id,
})
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in cursor:
# reverse reference
ids.append(row['value'])
sub_loaded = None
if ids:
sub_loaded = self.load_multiple(self, ids)
for id, sub_entity in sub_loaded.items():
# recursive delete
sub_entity.Entitier.delete(sub_entity, commit)
# hook invoke
if self.invoke_entity_hook:
IN.hook_invoke('_'.join(('entity_delete', entity.__type__, entity.type)), entity)
IN.hook_invoke('entity_delete_' + entity.__type__, entity)
# heavy. dont implement
IN.hook_invoke('__entity_delete__', entity)
# clear the cache
cacher = self.cacher
cacher.remove(entity.id)
if self.entity_load_all:
cacher.remove('all')
if self.entity_load_all_by_bundle:
self.cacher.remove(entity.type)
return True
except Exception as e:
IN.logger.debug()
return False
#-----------------------------------------------------------------------
# Vakai Model
#-----------------------------------------------------------------------
@IN.register('Vakai', type = 'Model')
class VakaiModel(In.entity.EntityModel):
'''Vakai Model'''
not_updatable_columns = ['id', 'type', 'created']
status_deleted = -1
def delete(self, entity, commit = True):
'''Recursively delete coments and its sub coments'''
if not entity.id:
return
# Instead of delete and its all sub Vakai
# just disable this Vakai only
# TODO: sub vakai delete
connection = IN.db.connection
try:
table_info = self.model['table']
table = table_info['name']
columns = table_info['columns']
cursor = IN.db.update({
'table' : table,
'set' : [['status', self.status_deleted]],
'where' : ['id', int(entity.id)]
}).execute()
if commit: # if caller will not handle the commit
connection.commit()
except Exception as e:
if commit:
connection.rollback()
IN.logger.debug()
raise e # re raise
return True
@IN.hook
def entity_model():
return {
'Vakai' : { # entity name
'table' : { # table
'name' : 'vakai',
'columns' : { # table columns / entity attributes
'id' : {},
'type' : {},
'created' : {},
'status' : {},
'nabar_id' : {},
'weight' : {},
},
'keys' : {
'primary' : 'id',
},
},
},
}
@IN.register('Vakai', type = 'Themer')
class VakaiThemer(In.entity.EntityThemer):
'''Vakai themer'''
def view_modes(self):
modes = super().view_modes()
modes.add('tag')
return modes
def theme(self, obj, format, view_mode, args):
obj.css.append('vakai-' + str(obj.id))
super().theme(obj, format, view_mode, args)
def theme_process_variables(self, obj, format, view_mode, args):
if args['context'].request.ajax:
# add sub list
data = {
'lazy_args' : {
'load_args' : {
'data' : {
'parent_entity_type' : obj.__type__, # always should be Vakai
'parent_entity_bundle' : obj.type,
'parent_entity_id' : obj.id, # parent
},
},
},
'parent_entity_type' : obj.__type__, # always should be Vakai
'parent_entity_bundle' : obj.type,
'parent_entity_id' : obj.id, # parent
}
sub_list = Object.new(type = 'VakaiListLazy', data = data)
args['sub_list'] = IN.themer.theme(sub_list)
super().theme_process_variables(obj, format, view_mode, args)
##nabar = args['context'].nabar
#if obj.nabar_id:
#nabar = IN.entitier.load('Nabar', obj.nabar_id)
#args['nabar_name'] = nabar.name
#args['nabar_id'] = nabar.id
#args['nabar_picture'] = IN.nabar.nabar_profile_picture_themed(nabar)
|
|
#!/usr/bin/env python
# Copyright (c) 2015, Andre Lucas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import inspect
import shutil
import unittest
from hsync.exceptions import *
from hsync.filehash import *
from hsync.hashlist import *
from hsync.hashlist_sqlite import *
from hsync.idmapper import *
class HashListTestCase(unittest.TestCase):
me = inspect.getfile(inspect.currentframe())
topdir = os.path.join(os.path.dirname(me), 'test')
mapper = UidGidMapper()
all_impl = HashList, SqliteHashList
@classmethod
def setUpClass(self):
self.user = self.mapper.get_name_for_uid(os.getuid())
self.group = self.mapper.get_group_for_gid(os.getgid())
def tearDown(self):
if hasattr(self, 'tmp') and self.tmp:
shutil.rmtree(self.tmp, True)
def test_init(self):
'''Object creation tests'''
for T in self.all_impl:
log.warning("XXX T %s", T)
hl = T()
self.assertIsNotNone(hl, "Non-null object returned")
def test_append(self):
'''Object can be appended'''
for T in self.all_impl:
hl = T()
fh = FileHash.init_from_string("0 100644 %s %s 0 0 test" %
(self.user, self.group))
# This should not raise.
hl.append(fh)
# Attempting to append a non-FileHash should raise.
with self.assertRaises(NotAFileHashError):
hl.append(None)
with self.assertRaises(NotAFileHashError):
hl.append("Nope")
def test_duplicate_raise(self):
'''Check exceptions are properly raised'''
for T in self.all_impl:
hl = T(raise_on_duplicates=True)
fh = FileHash.init_from_string("0 100644 %s %s 0 0 test" %
(self.user, self.group))
# This should not raise.
hl.append(fh)
self.assertEqual(len(hl), 1)
# Duplicate should raise.
with self.assertRaises(DuplicateEntryInHashListError):
hl.append(fh)
'''Check exceptions are properly raised'''
hl = T(raise_on_duplicates=False)
fh = FileHash.init_from_string("0 100644 %s %s 0 0 test" %
(self.user, self.group))
# This should not raise.
hl.append(fh)
# Duplicate should not raise.
hl.append(fh)
self.assertEqual(len(hl), 2)
def test_list_iterator(self):
'''Check we can iterate over the list properly'''
for T in self.all_impl:
hl = T()
fhlist = []
pfx = "0 100644 %s %s 0 0 test" % (self.user, self.group)
for n in xrange(1000):
fh = FileHash.init_from_string(pfx + '%0.3i' % n)
fhlist.append(fh)
hl.extend(fhlist)
self.assertEqual(len(hl), len(fhlist))
for n, fh in enumerate(hl):
self.assertEqual(fh, fhlist[n])
def test_list_indexing(self):
'''Check we can index the list properly'''
for T in self.all_impl:
hl = T()
fhlist = []
pfx = "0 100644 %s %s 0 0 test" % (self.user, self.group)
for n in xrange(1000):
fh = FileHash.init_from_string(pfx + '%0.3i' % n)
fhlist.append(fh)
hl.extend(fhlist)
self.assertEqual(len(hl), len(fhlist))
for n in xrange(1000):
self.assertEqual(hl[n], fhlist[n])
class HashDictTestCase(unittest.TestCase):
me = inspect.getfile(inspect.currentframe())
topdir = os.path.join(os.path.dirname(me), 'test')
mapper = UidGidMapper()
all_impl = HashList, SqliteHashList
@classmethod
def setUpClass(self):
self.user = self.mapper.get_name_for_uid(os.getuid())
self.group = self.mapper.get_group_for_gid(os.getgid())
def tearDown(self):
if hasattr(self, 'tmp') and self.tmp:
shutil.rmtree(self.tmp, True)
def test_init(self):
'''Object creation tests'''
with self.assertRaises(InitialiserNotAHashListError):
HashDict()
for T in self.all_impl:
hl = T()
HashDict(hl)
def test_lookup(self):
'''Check objects can be looked up'''
for T in self.all_impl:
hl = T()
fh = FileHash.init_from_string("0 100644 %s %s 0 0 test" %
(self.user, self.group))
# This should not raise.
hl.append(fh)
hd = HashDict(hl)
nfh = hd[fh.fpath]
self.assertIsInstance(nfh, FileHash, 'HashDict returns FileHash')
self.assertEqual(fh, nfh, 'HashDict returns same FileHash')
def test_multi_lookup(self):
'''Check we can index the dict properly'''
for T in self.all_impl:
hl = T()
fhlist = []
pfx = "0 100644 %s %s 0 0 test" % (self.user, self.group)
for n in xrange(1000):
fh = FileHash.init_from_string(pfx + '%0.3i' % n)
fhlist.append(fh)
hl.extend(fhlist)
self.assertEqual(len(hl), len(fhlist))
hd = HashDict(hl)
for fh in fhlist:
nfh = hd[fh.fpath]
self.assertEqual(fh, nfh, 'Path-based lookup works')
def test_iter(self):
'''Check we can index the dict properly'''
for T in self.all_impl:
hl = T()
fhlist = []
pfx = "0 100644 %s %s 0 0 test" % (self.user, self.group)
for n in xrange(1000):
fh = FileHash.init_from_string(pfx + '%0.3i' % n)
fhlist.append(fh)
hl.extend(fhlist)
self.assertEqual(len(hl), len(fhlist))
hd = HashDict(hl)
# Could use enumerate() below, but it makes a mess of the already-
# messy generator expression used to get sorted keys.
curfile = 0
for k, v in ((k, hd[k]) for k in sorted(hd.iterkeys())):
self.assertIsInstance(k, str, 'Key is correct type')
self.assertIsInstance(v, FileHash, 'Value is correct type')
self.assertEqual(v.fpath, 'test%0.3i' % curfile,
'Correct file returned')
curfile += 1
print(k, v)
def test_sort(self):
'''Check the keys are in path order'''
for T in self.all_impl:
pfx = "0 100644 %s %s 0 0 " % (self.user, self.group)
pathlist = [
['a', 'a/1', 'a/2', 'b'],
['b', 'a/1', 'a/2', 'a', 'c'],
['z', 'y', 'x', 'w'],
]
for paths in pathlist:
hl = T()
sorted_paths = sorted(paths)
fhlist = []
for f in paths:
fh = FileHash.init_from_string(pfx + f)
fhlist.append(fh)
hl.extend(fhlist)
fd = HashDict(hl)
for n, (k, fh) in enumerate(fd.iteritems()):
# The dict keys are the path.
self.assertEqual(k, sorted_paths[n])
# Check the object as well, for good measure.
self.assertEqual(fh.fpath, sorted_paths[n])
|
|
# coding:utf-8
from base import TestBase
class TestDefault(TestBase):
"""Default tests"""
# for debug
# def tearDown(self):
# pass
insert_position_text = """
# Heading 0
# Heading 1
...
## Heading 2
...
## Heading 3
...
# Heading with anchor [with-anchor]
...
"""
def test_before_than_TOC_should_be_ignored(self):
toc = self.init_insert(self.insert_position_text, 13)
self.assert_NotIn("Heading 0", toc)
def test_after_than_TOC_should_be_included(self):
toc = self.init_insert(self.insert_position_text, 13)
self.assert_In("Heading 1", toc)
self.assert_In("Heading 2", toc)
self.assert_In("Heading 3", toc)
self.assert_In("Heading with anchor", toc)
def test_ignore_inside_codeblock(self):
text = """
# heading1
```
# heading2
```
```markdown
# heading3
```
```
# heading4
# heading5
```
"""
toc = self.init_insert(text)
self.assert_In("heading1", toc)
self.assert_NotIn("heading3", toc)
self.assert_NotIn("heading2", toc)
self.assert_NotIn("heading4", toc)
self.assert_NotIn("heading5", toc)
def test_ignore_inside_codeblock_alt(self):
text = """
# heading1
~~~
# heading2
~~~
~~~markdown
# heading3
~~~
~~~
# heading4
# heading5
~~~
"""
toc = self.init_insert(text)
self.assert_In("heading1", toc)
self.assert_NotIn("heading3", toc)
self.assert_NotIn("heading2", toc)
self.assert_NotIn("heading4", toc)
self.assert_NotIn("heading5", toc)
def test_escape_link(self):
text = """
# This [link](http://sample.com/) is cool
"""
toc = self.init_insert(text)
self.assert_In("This link is cool", toc)
def test_escape_brackets(self):
"""Broken reference when header has square brackets
https://github.com/naokazuterada/MarkdownTOC/issues/57
"""
text = """
# function(foo[, bar])
"""
toc = self.init_insert(text)
self.assert_In("function\(foo\[, bar\]\)", toc)
def test_spaces_in_atx_heading(self):
text = """
#Heading 0
# Heading 1
"""
toc = self.init_insert(text)
self.assert_In("- Heading 0", toc)
self.assert_In("- Heading 1", toc)
def test_remove_atx_closing_seq(self):
""" Remove closing sequence of # characters"""
text = """
# Heading 0 #
## Heading 1 ###
# Heading 2 ##########
## Heading 3
"""
toc = self.init_insert(text)
self.assert_In("Heading 0\n", toc)
self.assert_In("Heading 1\n", toc)
self.assert_In("Heading 2\n", toc)
def test_id_replacement(self):
""" Reoplace chars(or string) in id_replacements object in id string"""
text = """
<!-- MarkdownTOC autolink=true -->
<!-- /MarkdownTOC -->
# Heading ! 0
# Heading # 1
# Heading !! 2
# Heading &and&and& 3
# <element1>
# <element2>
"""
toc = self.init_update(text)["toc"]
self.assert_In("- [Heading ! 0](#heading--0)", toc)
self.assert_In("- [Heading # 1](#heading--1)", toc)
self.assert_In("- [Heading !! 2](#heading--2)", toc)
self.assert_In("- [Heading &and&and& 3](#heading-andand-3)", toc)
self.assert_In("- [<element1>](#element1)", toc)
self.assert_In("- [<element2>](#element2)", toc)
def test_no_escape_in_code(self):
""" No escape in codeblock"""
text = """
<!-- MarkdownTOC -->
<!-- /MarkdownTOC -->
# `function(param, [optional])`
# (a static function) `greet([name])` (original, right?)
# `add(keys, command[, args][, context])`
# `get_context(key[, operator][, operand][, match_all])`
"""
toc = self.init_update(text)["toc"]
self.assert_In("- `function(param, [optional])`", toc)
self.assert_In(
"- \\(a static function\\) `greet([name])` \\(original, right?\\)", toc
)
self.assert_In("- `add(keys, command[, args][, context])`", toc)
self.assert_In("- `get_context(key[, operator][, operand][, match_all])`", toc)
def test_no_escape_in_code_with_link(self):
""" No escape in codeblock (with link)"""
text = """
<!-- MarkdownTOC autolink=true -->
<!-- /MarkdownTOC -->
# `function(param, [optional])`
# (a static function) `greet([name])` (original, right?)
# `add(keys, command[, args][, context])`
# `get_context(key[, operator][, operand][, match_all])`
"""
toc = self.init_update(text)["toc"]
self.assert_In(
"- [`function(param, [optional])`](#functionparam-optional)", toc
)
self.assert_In(
"- [\\(a static function\\) `greet([name])` \\(original, right?\\)](#a-static-function-greetname-original-right)",
toc,
)
self.assert_In(
"- [`add(keys, command[, args][, context])`](#addkeys-command-args-context)",
toc,
)
self.assert_In(
"- [`get_context(key[, operator][, operand][, match_all])`](#get_contextkey-operator-operand-match_all)",
toc,
)
def test_no_headings(self):
""" No headings there"""
text = """
<!-- MarkdownTOC autolink=true -->
<!-- /MarkdownTOC -->
# `function(param, [optional])`
# (a static function) `greet([name])` (original, right?)
# `add(keys, command[, args][, context])`
# `get_context(key[, operator][, operand][, match_all])`
"""
toc = self.init_update(text)["toc"]
self.assert_NotIn("^- ", toc)
def test_uniquify_id_1(self):
""" uniquify id if there are same text headings"""
text = """
<!-- MarkdownTOC autolink=true -->
<!-- /MarkdownTOC -->
# Heading
# Heading
# Heading
"""
toc = self.init_update(text)["toc"]
self.assert_In("- [Heading](#heading)", toc)
self.assert_In("- [Heading](#heading-1)", toc)
self.assert_In("- [Heading](#heading-2)", toc)
def test_uniquify_id_2(self):
""" handle = or - headings"""
text = """
<!-- MarkdownTOC autolink=true indent=" " -->
<!-- /MarkdownTOC -->
Heading 1
=======
Heading 2
-------
"""
toc = self.init_update(text)["toc"]
self.assert_In("- [Heading 1](#heading-1)", toc)
self.assert_In(" - [Heading 2](#heading-2)", toc)
def test_whitespace_in_begining(self):
"""Ignore images in heading"""
text = """
<!-- MarkdownTOC -->
<!-- /MarkdownTOC -->
# Heading
# Heading
# Heading
"""
toc = self.init_update(text)["toc"]
self.assert_In("- Heading", toc)
self.assert_NotIn("- Heading", toc)
self.assert_NotIn("- Heading", toc)
def test_image_in_heading(self):
"""Ignore images in heading"""
text = """
<!-- MarkdownTOC -->
<!-- /MarkdownTOC -->
#  Heading
# Image in sentence
"""
toc = self.init_update(text)["toc"]
self.assert_In("- Heading", toc)
self.assert_In("- Image in sentence", toc)
|
|
import numpy as np
import pytest
import dask.array as da
from dask.array.utils import assert_eq, same_keys, AxisError, IS_NEP18_ACTIVE
from dask.array.gufunc import apply_gufunc
from dask.sizeof import sizeof
cupy = pytest.importorskip("cupy")
functions = [
lambda x: x,
lambda x: da.expm1(x),
lambda x: 2 * x,
lambda x: x / 2,
lambda x: x ** 2,
lambda x: x + x,
lambda x: x * x,
lambda x: x[0],
lambda x: x[:, 1],
lambda x: x[:1, None, 1:3],
lambda x: x.T,
lambda x: da.transpose(x, (1, 2, 0)),
lambda x: x.sum(),
pytest.param(
lambda x: x.mean(),
marks=pytest.mark.xfail(
reason="requires NumPy>=1.17 and CuPy support for shape argument in *_like functions."
),
),
pytest.param(
lambda x: x.moment(order=0),
marks=pytest.mark.xfail(reason="see https://github.com/dask/dask/issues/4875"),
),
lambda x: x.moment(order=2),
pytest.param(
lambda x: x.std(),
marks=pytest.mark.xfail(
reason="requires NumPy>=1.17 and CuPy support for shape argument in *_like functions."
),
),
pytest.param(
lambda x: x.var(),
marks=pytest.mark.xfail(
reason="requires NumPy>=1.17 and CuPy support for shape argument in *_like functions."
),
),
pytest.param(
lambda x: x.dot(np.arange(x.shape[-1])),
marks=pytest.mark.xfail(reason="cupy.dot(numpy) fails"),
),
pytest.param(
lambda x: x.dot(np.eye(x.shape[-1])),
marks=pytest.mark.xfail(reason="cupy.dot(numpy) fails"),
),
pytest.param(
lambda x: da.tensordot(x, np.ones(x.shape[:2]), axes=[(0, 1), (0, 1)]),
marks=pytest.mark.xfail(reason="cupy.dot(numpy) fails"),
),
lambda x: x.sum(axis=0),
lambda x: x.max(axis=0),
lambda x: x.sum(axis=(1, 2)),
lambda x: x.astype(np.complex128),
lambda x: x.map_blocks(lambda x: x * 2),
pytest.param(
lambda x: x.round(1),
marks=pytest.mark.xfail(reason="cupy doesn't support round"),
),
lambda x: x.reshape((x.shape[0] * x.shape[1], x.shape[2])),
# Rechunking here is required, see https://github.com/dask/dask/issues/2561
lambda x: (x.rechunk(x.shape)).reshape((x.shape[1], x.shape[0], x.shape[2])),
lambda x: x.reshape((x.shape[0], x.shape[1], x.shape[2] / 2, x.shape[2] / 2)),
lambda x: abs(x),
lambda x: x > 0.5,
lambda x: x.rechunk((4, 4, 4)),
lambda x: x.rechunk((2, 2, 1)),
pytest.param(
lambda x: da.einsum("ijk,ijk", x, x),
marks=pytest.mark.xfail(
reason="depends on resolution of https://github.com/numpy/numpy/issues/12974"
),
),
lambda x: np.isneginf(x),
lambda x: np.isposinf(x),
pytest.param(
lambda x: np.isreal(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.iscomplex(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.real(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.imag(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.fix(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.i0(x.reshape((24,))),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.sinc(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
pytest.param(
lambda x: np.nan_to_num(x),
marks=pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
),
),
]
@pytest.mark.parametrize("func", functions)
def test_basic(func):
c = cupy.random.random((2, 3, 4))
n = c.get()
dc = da.from_array(c, chunks=(1, 2, 2), asarray=False)
dn = da.from_array(n, chunks=(1, 2, 2))
ddc = func(dc)
ddn = func(dn)
assert type(ddc._meta) == cupy.core.core.ndarray
assert_eq(ddc, ddc) # Check that _meta and computed arrays match types
assert_eq(ddc, ddn)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
def test_sizeof(dtype):
c = cupy.random.random((2, 3, 4), dtype=dtype)
assert sizeof(c) == c.nbytes
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_diag():
v = cupy.arange(11)
dv = da.from_array(v, chunks=(4,), asarray=False)
assert type(dv._meta) == cupy.core.core.ndarray
assert_eq(dv, dv) # Check that _meta and computed arrays match types
assert_eq(da.diag(dv), cupy.diag(v))
v = v + v + 3
dv = dv + dv + 3
darr = da.diag(dv)
cupyarr = cupy.diag(v)
assert type(darr._meta) == cupy.core.core.ndarray
assert_eq(darr, darr) # Check that _meta and computed arrays match types
assert_eq(darr, cupyarr)
x = cupy.arange(64).reshape((8, 8))
dx = da.from_array(x, chunks=(4, 4), asarray=False)
assert type(dx._meta) == cupy.core.core.ndarray
assert_eq(dx, dx) # Check that _meta and computed arrays match types
assert_eq(da.diag(dx), cupy.diag(x))
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_diagonal():
v = cupy.arange(11)
with pytest.raises(ValueError):
da.diagonal(v)
v = cupy.arange(4).reshape((2, 2))
with pytest.raises(ValueError):
da.diagonal(v, axis1=0, axis2=0)
with pytest.raises(AxisError):
da.diagonal(v, axis1=-4)
with pytest.raises(AxisError):
da.diagonal(v, axis2=-4)
v = cupy.arange(4 * 5 * 6).reshape((4, 5, 6))
v = da.from_array(v, chunks=2, asarray=False)
assert_eq(da.diagonal(v), np.diagonal(v))
# Empty diagonal.
assert_eq(da.diagonal(v, offset=10), np.diagonal(v, offset=10))
assert_eq(da.diagonal(v, offset=-10), np.diagonal(v, offset=-10))
assert isinstance(da.diagonal(v).compute(), cupy.core.core.ndarray)
with pytest.raises(ValueError):
da.diagonal(v, axis1=-2)
# Negative axis.
assert_eq(da.diagonal(v, axis1=-1), np.diagonal(v, axis1=-1))
assert_eq(da.diagonal(v, offset=1, axis1=-1), np.diagonal(v, offset=1, axis1=-1))
# Heterogenous chunks.
v = cupy.arange(2 * 3 * 4 * 5 * 6).reshape((2, 3, 4, 5, 6))
v = da.from_array(
v, chunks=(1, (1, 2), (1, 2, 1), (2, 1, 2), (5, 1)), asarray=False
)
assert_eq(da.diagonal(v), np.diagonal(v))
assert_eq(
da.diagonal(v, offset=2, axis1=3, axis2=1),
np.diagonal(v, offset=2, axis1=3, axis2=1),
)
assert_eq(
da.diagonal(v, offset=-2, axis1=3, axis2=1),
np.diagonal(v, offset=-2, axis1=3, axis2=1),
)
assert_eq(
da.diagonal(v, offset=-2, axis1=3, axis2=4),
np.diagonal(v, offset=-2, axis1=3, axis2=4),
)
assert_eq(da.diagonal(v, 1), np.diagonal(v, 1))
assert_eq(da.diagonal(v, -1), np.diagonal(v, -1))
# Positional arguments
assert_eq(da.diagonal(v, 1, 2, 1), np.diagonal(v, 1, 2, 1))
@pytest.mark.xfail(reason="no shape argument support *_like functions on CuPy yet")
@pytest.mark.skipif(
np.__version__ < "1.17", reason="no shape argument for *_like functions"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_tril_triu():
A = cupy.random.randn(20, 20)
for chk in [5, 4]:
dA = da.from_array(A, (chk, chk), asarray=False)
assert_eq(da.triu(dA), np.triu(A))
assert_eq(da.tril(dA), np.tril(A))
for k in [-25, -20, -9, -1, 1, 8, 19, 21]:
assert_eq(da.triu(dA, k), np.triu(A, k))
assert_eq(da.tril(dA, k), np.tril(A, k))
@pytest.mark.xfail(reason="no shape argument support *_like functions on CuPy yet")
@pytest.mark.skipif(
np.__version__ < "1.17", reason="no shape argument for *_like functions"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_tril_triu_non_square_arrays():
A = cupy.random.randint(0, 11, (30, 35))
dA = da.from_array(A, chunks=(5, 5), asarray=False)
assert_eq(da.triu(dA), np.triu(A))
assert_eq(da.tril(dA), np.tril(A))
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_apply_gufunc_axis():
def mydiff(x):
return np.diff(x)
a = cupy.random.randn(3, 6, 4)
da_ = da.from_array(a, chunks=2, asarray=False)
m = np.diff(a, axis=1)
dm = apply_gufunc(
mydiff, "(i)->(i)", da_, axis=1, output_sizes={"i": 5}, allow_rechunk=True
)
assert_eq(m, dm)
def test_overlap_internal():
x = cupy.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4), asarray=False)
g = da.overlap.overlap_internal(d, {0: 2, 1: 1})
assert g.chunks == ((6, 6), (5, 5))
expected = np.array(
[
[0, 1, 2, 3, 4, 3, 4, 5, 6, 7],
[8, 9, 10, 11, 12, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 59, 60, 61, 62, 63],
]
)
assert_eq(g, expected)
assert same_keys(da.overlap.overlap_internal(d, {0: 2, 1: 1}), g)
def test_trim_internal():
x = cupy.ones((40, 60))
d = da.from_array(x, chunks=(10, 10), asarray=False)
e = da.overlap.trim_internal(d, axes={0: 1, 1: 2})
assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_periodic():
x = cupy.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4), asarray=False)
e = da.overlap.periodic(d, axis=0, depth=2)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert_eq(e[1, :], d[-1, :])
assert_eq(e[0, :], d[-2, :])
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_reflect():
x = cupy.arange(10)
d = da.from_array(x, chunks=(5, 5), asarray=False)
e = da.overlap.reflect(d, axis=0, depth=2)
expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])
assert_eq(e, expected)
e = da.overlap.reflect(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert_eq(e, expected)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_nearest():
x = cupy.arange(10)
d = da.from_array(x, chunks=(5, 5), asarray=False)
e = da.overlap.nearest(d, axis=0, depth=2)
expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])
assert_eq(e, expected)
e = da.overlap.nearest(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert_eq(e, expected)
@pytest.mark.xfail(reason="no shape argument support *_like functions on CuPy yet")
@pytest.mark.skipif(
np.__version__ < "1.17", reason="no shape argument for *_like functions"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_constant():
x = cupy.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4), asarray=False)
e = da.overlap.constant(d, axis=0, depth=2, value=10)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert_eq(e[1, :], np.ones(8, dtype=x.dtype) * 10)
assert_eq(e[-1, :], np.ones(8, dtype=x.dtype) * 10)
@pytest.mark.xfail(reason="no shape argument support *_like functions on CuPy yet")
@pytest.mark.skipif(
np.__version__ < "1.17", reason="no shape argument for *_like functions"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_boundaries():
x = cupy.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4), asarray=False)
e = da.overlap.boundaries(d, {0: 2, 1: 1}, {0: 0, 1: "periodic"})
expected = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[7, 0, 1, 2, 3, 4, 5, 6, 7, 0],
[15, 8, 9, 10, 11, 12, 13, 14, 15, 8],
[23, 16, 17, 18, 19, 20, 21, 22, 23, 16],
[31, 24, 25, 26, 27, 28, 29, 30, 31, 24],
[39, 32, 33, 34, 35, 36, 37, 38, 39, 32],
[47, 40, 41, 42, 43, 44, 45, 46, 47, 40],
[55, 48, 49, 50, 51, 52, 53, 54, 55, 48],
[63, 56, 57, 58, 59, 60, 61, 62, 63, 56],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
assert_eq(e, expected)
def test_random_all():
def rnd_test(func, *args, **kwargs):
a = func(*args, **kwargs)
assert type(a._meta) == cupy.core.core.ndarray
assert_eq(a, a) # Check that _meta and computed arrays match types
rs = da.random.RandomState(RandomState=cupy.random.RandomState)
rnd_test(rs.beta, 1, 2, size=5, chunks=3)
rnd_test(rs.binomial, 10, 0.5, size=5, chunks=3)
rnd_test(rs.chisquare, 1, size=5, chunks=3)
rnd_test(rs.exponential, 1, size=5, chunks=3)
rnd_test(rs.f, 1, 2, size=5, chunks=3)
rnd_test(rs.gamma, 5, 1, size=5, chunks=3)
rnd_test(rs.geometric, 1, size=5, chunks=3)
rnd_test(rs.gumbel, 1, size=5, chunks=3)
rnd_test(rs.hypergeometric, 1, 2, 3, size=5, chunks=3)
rnd_test(rs.laplace, size=5, chunks=3)
rnd_test(rs.logistic, size=5, chunks=3)
rnd_test(rs.lognormal, size=5, chunks=3)
rnd_test(rs.logseries, 0.5, size=5, chunks=3)
# No RandomState for multinomial in CuPy
# rnd_test(rs.multinomial, 20, [1 / 6.] * 6, size=5, chunks=3)
rnd_test(rs.negative_binomial, 5, 0.5, size=5, chunks=3)
rnd_test(rs.noncentral_chisquare, 2, 2, size=5, chunks=3)
rnd_test(rs.noncentral_f, 2, 2, 3, size=5, chunks=3)
rnd_test(rs.normal, 2, 2, size=5, chunks=3)
rnd_test(rs.pareto, 1, size=5, chunks=3)
rnd_test(rs.poisson, size=5, chunks=3)
rnd_test(rs.power, 1, size=5, chunks=3)
rnd_test(rs.rayleigh, size=5, chunks=3)
rnd_test(rs.random_sample, size=5, chunks=3)
rnd_test(rs.triangular, 1, 2, 3, size=5, chunks=3)
rnd_test(rs.uniform, size=5, chunks=3)
rnd_test(rs.vonmises, 2, 3, size=5, chunks=3)
rnd_test(rs.wald, 1, 2, size=5, chunks=3)
rnd_test(rs.weibull, 2, size=5, chunks=3)
rnd_test(rs.zipf, 2, size=5, chunks=3)
rnd_test(rs.standard_cauchy, size=5, chunks=3)
rnd_test(rs.standard_exponential, size=5, chunks=3)
rnd_test(rs.standard_gamma, 2, size=5, chunks=3)
rnd_test(rs.standard_normal, size=5, chunks=3)
rnd_test(rs.standard_t, 2, size=5, chunks=3)
@pytest.mark.parametrize("shape", [(2, 3), (2, 3, 4), (2, 3, 4, 2)])
def test_random_shapes(shape):
rs = da.random.RandomState(RandomState=cupy.random.RandomState)
x = rs.poisson(size=shape, chunks=3)
assert type(x._meta) == cupy.core.core.ndarray
assert_eq(x, x) # Check that _meta and computed arrays match types
assert x._meta.shape == (0,) * len(shape)
assert x.shape == shape
@pytest.mark.xfail(
reason="CuPy division by zero on tensordot(), https://github.com/cupy/cupy/pull/2209"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
@pytest.mark.parametrize(
"m,n,chunks,error_type",
[
(20, 10, 10, None), # tall-skinny regular blocks
(20, 10, (3, 10), None), # tall-skinny regular fat layers
(20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers
(40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)
(128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1
(
129,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2
(
130,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
131,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2
(300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3
(300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4
(10, 5, 10, None), # single block tall
(5, 10, 10, None), # single block short
(10, 10, 10, None), # single block square
(10, 40, (10, 10), ValueError), # short-fat regular blocks
(10, 40, (10, 15), ValueError), # short-fat irregular blocks
(
10,
40,
(10, (15, 5, 5, 8, 7)),
ValueError,
), # short-fat non-uniform chunks (why?)
(20, 20, 10, ValueError), # 2x2 regular blocks
],
)
def test_tsqr(m, n, chunks, error_type):
mat = cupy.random.rand(m, n)
data = da.from_array(mat, chunks=chunks, name="A", asarray=False)
# qr
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
# svd
m_u = m
n_u = min(m, n)
n_s = n_q
m_vh = n_q
n_vh = n
d_vh = max(m_vh, n_vh) # full matrix returned
if error_type is None:
# test QR
q, r = da.linalg.tsqr(data)
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, da.dot(q, r)) # accuracy check
assert_eq(cupy.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal
assert_eq(r, np.triu(r.rechunk(r.shape[0]))) # r must be upper triangular
# test SVD
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
s_exact = np.linalg.svd(mat)[1]
assert_eq(s, s_exact) # s must contain the singular values
assert_eq((m_u, n_u), u.shape) # shape check
assert_eq((n_s,), s.shape) # shape check
assert_eq((d_vh, d_vh), vh.shape) # shape check
assert_eq(np.eye(n_u, n_u), da.dot(u.T, u)) # u must be orthonormal
assert_eq(np.eye(d_vh, d_vh), da.dot(vh, vh.T)) # vh must be orthonormal
assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check
else:
with pytest.raises(error_type):
q, r = da.linalg.tsqr(data)
with pytest.raises(error_type):
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
@pytest.mark.parametrize(
"m_min,n_max,chunks,vary_rows,vary_cols,error_type",
[
(10, 5, (10, 5), True, False, None), # single block tall
(10, 5, (10, 5), False, True, None), # single block tall
(10, 5, (10, 5), True, True, None), # single block tall
(40, 5, (10, 5), True, False, None), # multiple blocks tall
(40, 5, (10, 5), False, True, None), # multiple blocks tall
(40, 5, (10, 5), True, True, None), # multiple blocks tall
(
300,
10,
(40, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=4
(
300,
10,
(40, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=4
(
300,
10,
(40, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=4
],
)
def test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):
mat = cupy.random.rand(m_min * 2, n_max)
m, n = m_min * 2, n_max
mat[0:m_min, 0] += 1
_c0 = mat[:, 0]
_r0 = mat[0, :]
c0 = da.from_array(_c0, chunks=m_min, name="c", asarray=False)
r0 = da.from_array(_r0, chunks=n_max, name="r", asarray=False)
data = da.from_array(mat, chunks=chunks, name="A", asarray=False)
if vary_rows:
data = data[c0 > 0.5, :]
mat = mat[_c0 > 0.5, :]
m = mat.shape[0]
if vary_cols:
data = data[:, r0 > 0.5]
mat = mat[:, _r0 > 0.5]
n = mat.shape[1]
# qr
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
# svd
m_u = m
n_u = min(m, n)
n_s = n_q
m_vh = n_q
n_vh = n
d_vh = max(m_vh, n_vh) # full matrix returned
if error_type is None:
# test QR
q, r = da.linalg.tsqr(data)
q = q.compute() # because uncertainty
r = r.compute()
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, np.dot(q, r)) # accuracy check
assert_eq(np.eye(n_q, n_q), np.dot(q.T, q)) # q must be orthonormal
assert_eq(r, np.triu(r)) # r must be upper triangular
# test SVD
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
u = u.compute() # because uncertainty
s = s.compute()
vh = vh.compute()
s_exact = np.linalg.svd(mat)[1]
assert_eq(s, s_exact) # s must contain the singular values
assert_eq((m_u, n_u), u.shape) # shape check
assert_eq((n_s,), s.shape) # shape check
assert_eq((d_vh, d_vh), vh.shape) # shape check
assert_eq(np.eye(n_u, n_u), np.dot(u.T, u)) # u must be orthonormal
assert_eq(np.eye(d_vh, d_vh), np.dot(vh, vh.T)) # vh must be orthonormal
assert_eq(mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q])) # accuracy check
else:
with pytest.raises(error_type):
q, r = da.linalg.tsqr(data)
with pytest.raises(error_type):
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
@pytest.mark.parametrize(
"m,n,chunks,error_type",
[
(20, 10, 10, ValueError), # tall-skinny regular blocks
(20, 10, (3, 10), ValueError), # tall-skinny regular fat layers
(20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers
(
40,
10,
((15, 5, 5, 8, 7), 10),
ValueError,
), # tall-skinny non-uniform chunks (why?)
(
128,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=1
(
129,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2
(
130,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
131,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
300,
10,
(40, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=4
(10, 5, 10, None), # single block tall
(5, 10, 10, None), # single block short
(10, 10, 10, None), # single block square
(10, 40, (10, 10), None), # short-fat regular blocks
(10, 40, (10, 15), None), # short-fat irregular blocks
(10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)
(20, 20, 10, ValueError), # 2x2 regular blocks
],
)
def test_sfqr(m, n, chunks, error_type):
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=chunks, name="A")
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
m_qtq = n_q
if error_type is None:
q, r = da.linalg.sfqr(data)
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, da.dot(q, r)) # accuracy check
assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal
assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular
else:
with pytest.raises(error_type):
q, r = da.linalg.sfqr(data)
def test_sparse_hstack_vstack_csr():
pytest.importorskip("cupyx")
x = cupy.arange(24, dtype=cupy.float32).reshape(4, 6)
sp = da.from_array(x, chunks=(2, 3), asarray=False, fancy=False)
sp = sp.map_blocks(cupy.sparse.csr_matrix, dtype=cupy.float32)
y = sp.compute()
assert cupy.sparse.isspmatrix(y)
assert_eq(x, y.todense())
@pytest.mark.xfail(reason="no shape argument support *_like functions on CuPy yet")
@pytest.mark.skipif(
np.__version__ < "1.17", reason="no shape argument for *_like functions"
)
@pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NEP-18 support is not available in NumPy"
)
def test_bincount():
x = cupy.array([2, 1, 5, 2, 1])
d = da.from_array(x, chunks=2, asarray=False)
e = da.bincount(d, minlength=6)
assert_eq(e, np.bincount(x, minlength=6))
assert same_keys(da.bincount(d, minlength=6), e)
assert da.bincount(d, minlength=6).name != da.bincount(d, minlength=7).name
assert da.bincount(d, minlength=6).name == da.bincount(d, minlength=6).name
|
|
#!/usr/bin/env python
"""Tests for Yara flows."""
import contextlib
import functools
import inspect
import os
import platform
import string
from typing import Iterable
from typing import Optional
import unittest
from unittest import mock
from absl import app
import psutil
import yara
from grr_response_client import client_utils
from grr_response_client import process_error
from grr_response_client.client_actions import memory as memory_actions
from grr_response_client.client_actions import tempfiles
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import memory as rdf_memory
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server import flow_responses
from grr_response_server.databases import db
from grr_response_server.flows.general import memory
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr.test_lib import action_mocks
from grr.test_lib import client_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import testing_startup
ONE_MIB = 1024 * 1024
_TEST_YARA_SIGNATURE = """
rule test_rule {
meta:
desc = "Just for testing."
strings:
$s1 = { 31 32 33 34 }
condition:
$s1
}
"""
class FakeMatch(object):
strings = [(100, "$s1", b"1234"), (200, "$s1", b"1234")]
def __init__(self, rule_name="test_rule"):
self.rule = rule_name
class FakeRules(object):
invocations = []
rules = ["test_rule"]
def __getitem__(self, item):
return self.rules[item]
def match(self, data=None, timeout=None): # pylint:disable=invalid-name
self.invocations.append((data, timeout))
return []
class TimeoutRules(FakeRules):
def match(self, data=None, timeout=None): # pylint:disable=invalid-name
del data, timeout
raise yara.TimeoutError("Timed out.")
class TooManyHitsRules(FakeRules):
def match(self, data=None, timeout=None): # pylint:disable=invalid-name
self.invocations.append((data, timeout))
if len(self.invocations) >= 3:
raise yara.Error("internal error: 30")
return [FakeMatch("test_rule_%d" % len(self.invocations))]
def GeneratePattern(seed, length):
if not b"A" <= seed <= b"Z":
raise ValueError("Needs an upper case letter as seed")
ascii_uppercase = b"".join(_.encode("ascii") for _ in string.ascii_uppercase)
res = ascii_uppercase[ascii_uppercase.find(seed):]
while len(res) < length:
res += ascii_uppercase
return res[:length]
class FakeRegion(object):
def __init__(self,
start=0,
data=b"",
is_executable=False,
is_writable=False,
is_readable=True):
self.start = start
self.data = data
self.is_executable = is_executable
self.is_writable = is_writable
self.is_readable = is_readable
@property
def size(self):
return len(self.data)
@property
def end(self):
return self.start + self.size
class FakeMemoryProcess(object):
regions_by_pid = {
101: [],
102: [FakeRegion(0, b"A" * 98 + b"1234" + b"B" * 50)],
103: [FakeRegion(0, b"A" * 100),
FakeRegion(10000, b"B" * 500)],
104: [
FakeRegion(0, b"A" * 100),
FakeRegion(1000, b"X" * 50 + b"1234" + b"X" * 50)
],
105: [
FakeRegion(0, GeneratePattern(b"A", 100)),
FakeRegion(300, GeneratePattern(b"B", 700))
],
106: [],
107: [
FakeRegion(0, b"A" * 98 + b"1234" + b"B" * 50),
FakeRegion(400, b"C" * 50 + b"1234")
],
108: [
FakeRegion(0, b"A" * 100, is_executable=True, is_writable=True),
FakeRegion(1000, b"X" * 50 + b"1234" + b"X" * 50)
],
109: [
FakeRegion(0, b"A" * 100),
FakeRegion(100, b"B"),
FakeRegion(101, b"X" * 50 + b"1234" + b"X" * 50)
],
110: [
FakeRegion(0, b"A" * 100),
FakeRegion(1000, b"X" * ONE_MIB + b"1234" + b"X" * ONE_MIB),
FakeRegion(3000000, b"A" * 100),
],
}
def __init__(self, pid=None, tmp_dir=None):
self.pid = pid
self.regions = self.regions_by_pid[pid]
self._tmp_dir = tmp_dir
self._file_descriptor = None
def __enter__(self):
if self.pid in [101, 106]:
raise process_error.ProcessError("Access Denied.")
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
if self._file_descriptor is not None:
os.close(self._file_descriptor)
def ReadBytes(self, address, num_bytes):
for region in self.regions:
if address >= region.start and address + num_bytes <= region.end:
offset = address - region.start
return region.data[offset:offset + num_bytes]
def Regions(self,
skip_mapped_files=False,
skip_shared_regions=False,
skip_executable_regions=False,
skip_readonly_regions=False):
del skip_mapped_files
del skip_shared_regions
del skip_executable_regions
del skip_readonly_regions
for region in self.regions:
yield rdf_memory.ProcessMemoryRegion(
start=region.start,
size=region.size,
is_executable=region.is_executable,
is_writable=region.is_writable,
is_readable=region.is_readable)
@property
def serialized_file_descriptor(self):
if self._file_descriptor is not None:
return self._file_descriptor
memory_path = self._WriteMemoryToFile()
self._file_descriptor = os.open(memory_path, os.O_RDONLY)
return self._file_descriptor
def _WriteMemoryToFile(self) -> str:
memory_path = os.path.join(self._tmp_dir, str(self.pid))
prev_region = None
with open(memory_path, "wb") as f:
for region in self.regions:
if prev_region is None:
prev_end = 0
else:
prev_end = prev_region.end
assert region.start >= prev_end
f.write(b"\x00" * (region.start - prev_end))
f.write(region.data)
prev_region = region
return memory_path
class BaseYaraFlowsTest(flow_test_lib.FlowTestsBaseclass):
"""Tests the Yara flows."""
NO_MATCH_PIDS = (101, 103, 105, 106)
MATCH_PID_1_REGION = 102
MATCH_PID_2_REGIONS = 108
MATCH_BIG_REGIONS = 110
def process(self, processes, pid=None):
for stack_frame in inspect.stack():
# grr_response_client/unprivileged/communication.py needs a real process.
if ("unprivileged" in stack_frame.filename and
"communication.py" in stack_frame.filename):
return psutil.Process.old_target(pid=pid) # pytype: disable=attribute-error
if not pid:
return psutil.Process.old_target() # pytype: disable=attribute-error
for p in processes:
if p.pid == pid:
return p
raise psutil.NoSuchProcess("No process with pid %d." % pid)
def _RunYaraProcessScan(self,
procs,
action_mock=None,
ignore_grr_process=False,
include_errors_in_results="NO_ERRORS",
include_misses_in_results=False,
max_results_per_process=0,
**kw):
if action_mock is None:
client_mock = action_mocks.ActionMock(memory_actions.YaraProcessScan)
else:
client_mock = action_mock
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(client_utils, "OpenProcessForMemoryAccess",
lambda pid: FakeMemoryProcess(pid=pid, tmp_dir=self._tmp_dir))):
session_id = flow_test_lib.TestFlowHelper(
memory.YaraProcessScan.__name__,
client_mock,
yara_signature=_TEST_YARA_SIGNATURE,
client_id=self.client_id,
ignore_grr_process=ignore_grr_process,
include_errors_in_results=include_errors_in_results,
include_misses_in_results=include_misses_in_results,
max_results_per_process=max_results_per_process,
creator=self.test_username,
**kw)
res = flow_test_lib.GetFlowResults(self.client_id, session_id)
matches = [r for r in res if isinstance(r, rdf_memory.YaraProcessScanMatch)]
errors = [r for r in res if isinstance(r, rdf_memory.ProcessMemoryError)]
misses = [r for r in res if isinstance(r, rdf_memory.YaraProcessScanMiss)]
return matches, errors, misses
def setUp(self):
super().setUp()
stack = contextlib.ExitStack()
self.addCleanup(stack.close)
self._tmp_dir = stack.enter_context(utils.TempDirectory())
self.client_id = self.SetupClient(0)
self.procs = [
client_test_lib.MockWindowsProcess(pid=101, name="proc101.exe"),
client_test_lib.MockWindowsProcess(
pid=102, name="proc102.exe", ppid=101),
client_test_lib.MockWindowsProcess(pid=103, name="proc103.exe", ppid=1),
client_test_lib.MockWindowsProcess(
pid=104, name="proc104.exe", ppid=103),
client_test_lib.MockWindowsProcess(pid=105, name="proc105.exe", ppid=1),
client_test_lib.MockWindowsProcess(
pid=106, name="proc106.exe", ppid=104),
client_test_lib.MockWindowsProcess(pid=108, name="proc108.exe"),
client_test_lib.MockWindowsProcess(pid=109, name="proc109.exe"),
client_test_lib.MockWindowsProcess(pid=110, name="proc110.exe"),
]
class YaraFlowsTest(BaseYaraFlowsTest):
"""Tests the Yara flows."""
def testIncludePrivilegedErrors(self):
procs = [p for p in self.procs if p.pid in [101, 106]]
matches, errors, misses = self._RunYaraProcessScan(
procs,
include_misses_in_results=True,
include_errors_in_results="ALL_ERRORS")
self.assertLen(matches, 0)
self.assertLen(errors, 2)
self.assertLen(misses, 0)
def testIgnorePrivilegedErrors(self):
procs = [p for p in self.procs if p.pid in [101, 106]]
matches, errors, misses = self._RunYaraProcessScan(
procs,
include_misses_in_results=True,
include_errors_in_results="CRITICAL_ERRORS")
self.assertLen(matches, 0)
self.assertLen(errors, 0)
self.assertLen(misses, 0)
def testYaraProcessScanWithMissesAndErrors(self):
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(123456789)):
matches, errors, misses = self._RunYaraProcessScan(
procs,
include_misses_in_results=True,
include_errors_in_results="ALL_ERRORS")
self.assertLen(matches, 2)
self.assertLen(errors, 2)
self.assertLen(misses, 2)
for scan_match in matches:
for match in scan_match.match:
self.assertEqual(match.rule_name, "test_rule")
self.assertLen(match.string_matches, 1)
for string_match in match.string_matches:
self.assertEqual(string_match.data, b"1234")
self.assertEqual(string_match.string_id, "$s1")
self.assertIn(string_match.offset, [98, 1050])
@mock.patch.object(memory, "_YARA_SIGNATURE_SHARD_SIZE", 1 << 30)
def testYaraProcessScan_SingleSignatureShard(self):
action_mock = action_mocks.ActionMock(memory_actions.YaraProcessScan)
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
scan_params = {
"include_misses_in_results": True,
"include_errors_in_results": "ALL_ERRORS",
"max_results_per_process": 0,
"ignore_grr_process": False,
}
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(123456789)):
matches, errors, misses = self._RunYaraProcessScan(
procs, action_mock=action_mock, **scan_params)
# Verify scan results.
self.assertLen(matches, 2)
self.assertLen(errors, 2)
self.assertLen(misses, 2)
self.assertEqual(matches[0].match[0].rule_name, "test_rule")
self.assertEqual(matches[0].match[0].string_matches[0].data, b"1234")
flow = data_store.REL_DB.ReadAllFlowObjects(
self.client_id, include_child_flows=False)[0]
# We expect to have sent 1 YaraProcessScanRequest to the client.
self.assertEqual(flow.next_outbound_id, 2)
self.assertEqual(action_mock.recorded_messages[0].session_id.Basename(),
flow.flow_id)
scan_requests = action_mock.recorded_args["YaraProcessScan"]
expected_request = rdf_memory.YaraProcessScanRequest(
signature_shard=rdf_memory.YaraSignatureShard(
index=0, payload=_TEST_YARA_SIGNATURE.encode("utf-8")),
num_signature_shards=1,
**scan_params)
self.assertListEqual(scan_requests, [expected_request])
@mock.patch.object(memory, "_YARA_SIGNATURE_SHARD_SIZE", 30)
def testYaraProcessScan_MultipleSignatureShards(self):
action_mock = action_mocks.ActionMock(memory_actions.YaraProcessScan)
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
scan_params = {
"include_misses_in_results": True,
"include_errors_in_results": "ALL_ERRORS",
"max_results_per_process": 0,
"ignore_grr_process": False,
}
with test_lib.FakeTime(
rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(123456789)):
matches, errors, misses = self._RunYaraProcessScan(
procs, action_mock=action_mock, **scan_params)
# Verify scan results.
self.assertLen(matches, 2)
self.assertLen(errors, 2)
self.assertLen(misses, 2)
self.assertEqual(matches[0].match[0].rule_name, "test_rule")
self.assertEqual(matches[0].match[0].string_matches[0].data, b"1234")
flow = data_store.REL_DB.ReadAllFlowObjects(
self.client_id, include_child_flows=False)[0]
# We expect to have sent 4 YaraProcessScanRequests to the client.
self.assertEqual(flow.next_outbound_id, 5)
scan_requests = action_mock.recorded_args["YaraProcessScan"]
signature_bytes = _TEST_YARA_SIGNATURE.encode("utf-8")
expected_requests = [
rdf_memory.YaraProcessScanRequest(
signature_shard=rdf_memory.YaraSignatureShard(
index=0, payload=signature_bytes[0:30]),
num_signature_shards=4,
**scan_params),
rdf_memory.YaraProcessScanRequest(
signature_shard=rdf_memory.YaraSignatureShard(
index=1, payload=signature_bytes[30:60]),
num_signature_shards=4,
**scan_params),
rdf_memory.YaraProcessScanRequest(
signature_shard=rdf_memory.YaraSignatureShard(
index=2, payload=signature_bytes[60:90]),
num_signature_shards=4,
**scan_params),
rdf_memory.YaraProcessScanRequest(
signature_shard=rdf_memory.YaraSignatureShard(
index=3, payload=signature_bytes[90:]),
num_signature_shards=4,
**scan_params),
]
self.assertCountEqual(scan_requests, expected_requests)
def testYaraProcessScanWithoutMissesAndErrors(self):
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
matches, errors, misses = self._RunYaraProcessScan(procs)
self.assertLen(matches, 2)
self.assertEmpty(errors)
self.assertEmpty(misses)
def testYaraProcessScanWithMissesWithoutErrors(self):
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
matches, errors, misses = self._RunYaraProcessScan(
procs, include_misses_in_results=True)
self.assertLen(matches, 2)
self.assertEmpty(errors)
self.assertLen(misses, 2)
def testYaraProcessScanWithoutMissesWithErrors(self):
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
matches, errors, misses = self._RunYaraProcessScan(
procs, include_errors_in_results="ALL_ERRORS")
self.assertLen(matches, 2)
self.assertLen(errors, 2)
self.assertEmpty(misses)
def testYaraProcessScanLimitMatches(self):
proc = client_test_lib.MockWindowsProcess(pid=107, name="proc107.exe")
matches, _, _ = self._RunYaraProcessScan([proc])
self.assertLen(matches[0].match, 2)
matches, _, _ = self._RunYaraProcessScan([proc], max_results_per_process=1)
self.assertLen(matches[0].match, 1)
def testScanTimingInformation(self):
with test_lib.FakeTime(10000, increment=1):
_, _, misses = self._RunYaraProcessScan(
self.procs, pids=[105], include_misses_in_results=True)
self.assertLen(misses, 1)
miss = misses[0]
self.assertEqual(miss.scan_time_us, 6 * 1e6)
with test_lib.FakeTime(10000, increment=1):
matches, _, _ = self._RunYaraProcessScan(self.procs, pids=[102])
self.assertLen(matches, 1)
match = matches[0]
self.assertEqual(match.scan_time_us, 4 * 1e6)
def testScanResponseChunking(self):
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with mock.patch.object(
memory_actions.YaraProcessScan, "_RESULTS_PER_RESPONSE", new=2):
with test_lib.Instrument(memory_actions.YaraProcessScan,
"SendReply") as sr:
matches, errors, misses = self._RunYaraProcessScan(
procs,
include_misses_in_results=True,
include_errors_in_results="ALL_ERRORS")
# 6 results, 2 results per message -> 3 messages. The fourth message is
# the status.
self.assertEqual(sr.call_count, 4)
self.assertLen(matches, 2)
self.assertLen(errors, 2)
self.assertLen(misses, 2)
def testPIDsRestriction(self):
matches, errors, misses = self._RunYaraProcessScan(
self.procs,
pids=[101, 104, 105],
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True)
self.assertLen(matches, 1)
self.assertLen(errors, 1)
self.assertLen(misses, 1)
def testProcessRegex(self):
matches, errors, misses = self._RunYaraProcessScan(
self.procs,
process_regex="10(3|6)",
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True)
self.assertEmpty(matches)
self.assertLen(errors, 1)
self.assertLen(misses, 1)
def testPerProcessTimeoutArg(self):
FakeRules.invocations = []
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with utils.Stubber(yara, "compile", lambda source: FakeRules()):
self._RunYaraProcessScan(procs, per_process_timeout=50)
self.assertLen(FakeRules.invocations, 7)
for invocation in FakeRules.invocations:
_, limit = invocation
self.assertGreater(limit, 45)
self.assertLessEqual(limit, 50)
def testPerProcessTimeout(self):
FakeRules.invocations = []
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with utils.Stubber(yara, "compile", lambda source: TimeoutRules()):
matches, errors, misses = self._RunYaraProcessScan(
procs,
per_process_timeout=50,
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True)
self.assertEmpty(matches)
self.assertLen(errors, 6)
self.assertEmpty(misses)
for e in errors:
if e.process.pid in [101, 106]:
self.assertEqual("Access Denied.", e.error)
else:
self.assertIn("Scanning timed out", e.error)
def testTooManyHitsError(self):
FakeRules.invocations = []
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with utils.Stubber(yara, "compile", lambda source: TooManyHitsRules()):
matches, errors, misses = self._RunYaraProcessScan(
procs,
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True)
# The third invocation raises too many hits, make sure we get the
# first two matches anyways.
self.assertLen(matches, 2)
self.assertCountEqual([m.match[0].rule_name for m in matches],
["test_rule_1", "test_rule_2"])
self.assertLen(errors, 2)
self.assertLen(misses, 2)
def testYaraProcessScanChunkingWorks(self):
FakeRules.invocations = []
procs = [
p for p in self.procs if p.pid in [101, 102, 103, 104, 105, 106, 107]
]
with utils.Stubber(yara, "compile", lambda source: FakeRules()):
self._RunYaraProcessScan(procs, chunk_size=100, overlap_size=10)
self.assertLen(FakeRules.invocations, 21)
for data, _ in FakeRules.invocations:
self.assertLessEqual(len(data), 100)
def testMatchSpanningChunks(self):
# Process 102 has a hit spanning bytes 98-102, let's set the chunk
# size around that.
for chunk_size in range(97, 104):
matches, errors, misses = self._RunYaraProcessScan(
self.procs,
chunk_size=chunk_size,
overlap_size=10,
pids=[102],
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True)
self.assertLen(matches, 1)
self.assertEmpty(misses)
self.assertEmpty(errors)
def testDoubleMatchesAreAvoided(self):
# Process 102 has a hit going from 98-102. If we set the chunk
# size a bit larger than that, the hit will be scanned twice. We
# still expect a single match only.
matches, _, _ = self._RunYaraProcessScan(
self.procs, chunk_size=105, overlap_size=10, pids=[102])
self.assertLen(matches, 1)
self.assertLen(matches[0].match, 1)
def _RunProcessDump(self, pids=None, size_limit=None, chunk_size=None):
procs = self.procs
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(client_utils, "OpenProcessForMemoryAccess",
lambda pid: FakeMemoryProcess(pid=pid, tmp_dir=self._tmp_dir))):
client_mock = action_mocks.MultiGetFileClientMock(
memory_actions.YaraProcessDump, tempfiles.DeleteGRRTempFiles)
session_id = flow_test_lib.TestFlowHelper(
memory.DumpProcessMemory.__name__,
client_mock,
pids=pids or [105],
size_limit=size_limit,
chunk_size=chunk_size,
client_id=self.client_id,
ignore_grr_process=True,
creator=self.test_username)
return flow_test_lib.GetFlowResults(self.client_id, session_id)
def _ReadFromPathspec(self, pathspec, num_bytes):
fd = file_store.OpenFile(
db.ClientPath.FromPathSpec(self.client_id, pathspec))
return fd.read(num_bytes)
def testProcessDump(self):
results = self._RunProcessDump()
self.assertLen(results, 3)
for result in results:
if isinstance(result, rdf_client_fs.StatEntry):
self.assertIn("proc105.exe_105", result.pathspec.path)
data = self._ReadFromPathspec(result.pathspec, 1000)
self.assertIn(data,
[GeneratePattern(b"A", 100),
GeneratePattern(b"B", 700)])
elif isinstance(result, rdf_memory.YaraProcessDumpResponse):
self.assertLen(result.dumped_processes, 1)
self.assertEqual(result.dumped_processes[0].process.pid, 105)
else:
self.fail("Unexpected result type %s" % type(result))
def testProcessDumpChunked(self):
with test_lib.Instrument(FakeMemoryProcess, "ReadBytes") as read_func:
results = self._RunProcessDump(chunk_size=11)
# Check that the chunked reads actually happened. Should be 74 reads:
# 100 / 11 + 700 / 11 = 9.1 + 63.6 -> 10 + 64 reads
self.assertLen(read_func.args, 74)
self.assertLen(results, 3)
for result in results:
if isinstance(result, rdf_client_fs.StatEntry):
self.assertIn("proc105.exe_105", result.pathspec.path)
data = self._ReadFromPathspec(result.pathspec, 1000)
self.assertIn(data,
[GeneratePattern(b"A", 100),
GeneratePattern(b"B", 700)])
elif isinstance(result, rdf_memory.YaraProcessDumpResponse):
self.assertLen(result.dumped_processes, 1)
self.assertEqual(result.dumped_processes[0].process.pid, 105)
else:
self.fail("Unexpected result type %s" % type(result))
def testProcessDumpWithLimit(self):
results = self._RunProcessDump(size_limit=100)
# Now we should only get one block (+ the YaraProcessDumpResponse), the
# second is over the limit.
self.assertLen(results, 2)
for result in results:
if isinstance(result, rdf_client_fs.StatEntry):
self.assertIn("proc105.exe_105", result.pathspec.path)
data = self._ReadFromPathspec(result.pathspec, 1000)
self.assertEqual(data, GeneratePattern(b"A", 100))
elif isinstance(result, rdf_memory.YaraProcessDumpResponse):
self.assertLen(result.dumped_processes, 1)
self.assertEqual(result.dumped_processes[0].process.pid, 105)
self.assertIn("limit exceeded", result.dumped_processes[0].error)
else:
self.fail("Unexpected result type %s" % type(result))
def testProcessDumpPartiallyDumpsMemory(self):
results = self._RunProcessDump(size_limit=20)
self.assertLen(results, 2)
process = results[0].dumped_processes[0]
self.assertLen(process.memory_regions, 1)
self.assertEqual(process.memory_regions[0].size, 100)
self.assertEqual(process.memory_regions[0].dumped_size, 20)
self.assertEqual(results[1].st_size, 20)
def testProcessDumpByDefaultErrors(self):
# This tests that not specifying any restrictions on the processes
# to dump does not dump them all which would return tons of data.
client_mock = action_mocks.MultiGetFileClientMock(
memory_actions.YaraProcessDump, tempfiles.DeleteGRRTempFiles)
flow_id = flow_test_lib.TestFlowHelper(
memory.DumpProcessMemory.__name__,
client_mock,
client_id=self.client_id,
ignore_grr_process=True,
check_flow_errors=False,
creator=self.test_username)
flow_obj = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(flow_obj.error_message, "No processes to dump specified.")
def testDumpTimingInformation(self):
with test_lib.FakeTime(100000, 0.1):
results = self._RunProcessDump()
self.assertGreater(len(results), 1)
self.assertIsInstance(results[0], rdf_memory.YaraProcessDumpResponse)
self.assertLen(results[0].dumped_processes, 1)
self.assertGreater(results[0].dumped_processes[0].dump_time_us, 0)
def testSucceedsWhenUnderRuntimeLimit(self):
procs = [p for p in self.procs if p.pid in [102]]
matches, _, _ = self._RunYaraProcessScan(
procs, runtime_limit=rdfvalue.Duration.From(20, rdfvalue.SECONDS))
self.assertLen(matches, 1)
def testPropagatesScanRuntimeLimit(self):
procs = [p for p in self.procs if p.pid in [102]]
runtime_limits = []
def Run(yps, args):
del args # Unused.
runtime_limits.append(yps.message.runtime_limit_us)
with mock.patch.object(memory_actions.YaraProcessScan, "Run", Run):
self._RunYaraProcessScan(
procs,
scan_runtime_limit_us=rdfvalue.Duration.From(5, rdfvalue.SECONDS))
self.assertLen(runtime_limits, 1)
self.assertEqual(runtime_limits[0],
rdfvalue.Duration.From(5, rdfvalue.SECONDS))
def testFailsWithExceededScanRuntimeLimit(self):
procs = [p for p in self.procs if p.pid in [102]]
with self.assertRaisesRegex(RuntimeError, r"Runtime limit exceeded"):
self._RunYaraProcessScan(
procs,
scan_runtime_limit_us=rdfvalue.Duration.From(1,
rdfvalue.MICROSECONDS))
def testScanAndDump(self):
client_mock = action_mocks.MultiGetFileClientMock(
memory_actions.YaraProcessScan, memory_actions.YaraProcessDump,
tempfiles.DeleteGRRTempFiles)
procs = [p for p in self.procs if p.pid in [102, 103]]
with mock.patch.object(file_store.EXTERNAL_FILE_STORE, "AddFiles") as efs:
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(client_utils, "OpenProcessForMemoryAccess",
lambda pid: FakeMemoryProcess(pid=pid, tmp_dir=self._tmp_dir))):
session_id = flow_test_lib.TestFlowHelper(
memory.YaraProcessScan.__name__,
client_mock,
yara_signature=_TEST_YARA_SIGNATURE,
client_id=self.client_id,
creator=self.test_username,
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True,
dump_process_on_match=True)
# Process dumps are not pushed to external file stores.
self.assertEqual(efs.call_count, 0)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
# 1. Scan result match.
# 2. Scan result miss.
# 3. ProcDump response.
# 4. Stat entry for the dumped file.
self.assertLen(results, 4)
self.assertIsInstance(results[0], rdf_memory.YaraProcessScanMatch)
self.assertIsInstance(results[1], rdf_memory.YaraProcessScanMiss)
self.assertIsInstance(results[2], rdf_memory.YaraProcessDumpResponse)
self.assertIsInstance(results[3], rdf_client_fs.StatEntry)
self.assertLen(results[2].dumped_processes, 1)
self.assertEqual(results[0].process.pid,
results[2].dumped_processes[0].process.pid)
self.assertEmpty(results[2].dumped_processes[0].dump_files)
self.assertLen(results[2].dumped_processes[0].memory_regions, 1)
# TODO: Fix PathSpec.__eq__, then compare PathSpecs here.
self.assertEqual(
results[2].dumped_processes[0].memory_regions[0].file.CollapsePath(),
results[3].pathspec.CollapsePath())
def testScanAndDumpPopulatesMemoryRegions(self):
client_mock = action_mocks.MultiGetFileClientMock(
memory_actions.YaraProcessScan, memory_actions.YaraProcessDump,
tempfiles.DeleteGRRTempFiles)
procs = [p for p in self.procs if p.pid in [108]]
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(client_utils, "OpenProcessForMemoryAccess",
lambda pid: FakeMemoryProcess(pid=pid, tmp_dir=self._tmp_dir))):
session_id = flow_test_lib.TestFlowHelper(
memory.YaraProcessScan.__name__,
client_mock,
yara_signature=_TEST_YARA_SIGNATURE,
client_id=self.client_id,
creator=self.test_username,
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True,
dump_process_on_match=True)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
dumps = [
r for r in results if isinstance(r, rdf_memory.YaraProcessDumpResponse)
]
self.assertLen(dumps, 1)
self.assertLen(dumps[0].dumped_processes, 1)
self.assertLen(dumps[0].dumped_processes[0].memory_regions, 2)
regions = dumps[0].dumped_processes[0].memory_regions
self.assertEqual(regions[0].start, 0)
self.assertEqual(regions[0].size, 100)
self.assertEqual(regions[0].dumped_size, 100)
self.assertEqual(regions[0].is_executable, True)
self.assertEqual(regions[0].is_writable, True)
self.assertIsNotNone(regions[0].file)
self.assertEqual(regions[1].start, 1000)
self.assertEqual(regions[1].size, 104)
self.assertEqual(regions[1].dumped_size, 104)
self.assertEqual(regions[1].is_executable, False)
self.assertEqual(regions[1].is_writable, False)
self.assertIsNotNone(regions[1].file)
def testScanAndDumpPrioritizesRegionsWithMatch(self):
client_mock = action_mocks.MultiGetFileClientMock(
memory_actions.YaraProcessScan, memory_actions.YaraProcessDump,
tempfiles.DeleteGRRTempFiles)
procs = [p for p in self.procs if p.pid in [109]]
with utils.MultiStubber(
(psutil, "process_iter", lambda: procs),
(psutil, "Process", functools.partial(self.process, procs)),
(client_utils, "OpenProcessForMemoryAccess",
lambda pid: FakeMemoryProcess(pid=pid, tmp_dir=self._tmp_dir))):
session_id = flow_test_lib.TestFlowHelper(
memory.YaraProcessScan.__name__,
client_mock,
yara_signature=_TEST_YARA_SIGNATURE,
client_id=self.client_id,
creator=self.test_username,
include_errors_in_results="ALL_ERRORS",
include_misses_in_results=True,
dump_process_on_match=True,
process_dump_size_limit=100 + 104) # size of first and third region.
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
dumps = [
r for r in results if isinstance(r, rdf_memory.YaraProcessDumpResponse)
]
self.assertLen(dumps, 1)
self.assertLen(dumps[0].dumped_processes, 1)
self.assertLen(dumps[0].dumped_processes[0].memory_regions, 2)
regions = dumps[0].dumped_processes[0].memory_regions
# Dump should skip the second region, because the first and third fill the
# size limit.
self.assertEqual(regions[0].start, 0)
self.assertEqual(regions[0].dumped_size, 100)
self.assertIsNotNone(regions[0].file)
self.assertEqual(regions[1].start, 101)
self.assertEqual(regions[1].dumped_size, 104)
self.assertIsNotNone(regions[1].file)
def testLegacyDataMigration(self):
res = rdf_memory.YaraProcessDumpResponse(dumped_processes=[
rdf_memory.YaraProcessDumpInformation(dump_files=[
rdf_paths.PathSpec(
path="C:\\Foo\\Bar\\%s_%d_%x_%x.tmp" %
("my_proc", 123, 111, 222),
pathtype="TMPFILE"),
rdf_paths.PathSpec(
path="/foo/bar/%s_%d_%x_%x.tmp" % ("my_proc", 123, 456, 789),
pathtype="TMPFILE")
])
])
memory._MigrateLegacyDumpFilesToMemoryAreas(res)
self.assertEqual(
res,
rdf_memory.YaraProcessDumpResponse(dumped_processes=[
rdf_memory.YaraProcessDumpInformation(memory_regions=[
rdf_memory.ProcessMemoryRegion(
start=111,
size=111,
file=rdf_paths.PathSpec(
path="/C:/Foo/Bar/%s_%d_%x_%x.tmp" %
("my_proc", 123, 111, 222),
pathtype="TMPFILE")),
rdf_memory.ProcessMemoryRegion(
start=456,
size=333,
file=rdf_paths.PathSpec(
path="/foo/bar/%s_%d_%x_%x.tmp" %
("my_proc", 123, 456, 789),
pathtype="TMPFILE"))
])
]))
def testPathSpecCasingIsCorrected(self):
flow = memory.DumpProcessMemory(rdf_flow_objects.Flow())
flow.SendReply = mock.Mock(spec=flow.SendReply)
request = rdf_flow_objects.FlowRequest(
request_data={
"YaraProcessDumpResponse":
rdf_memory.YaraProcessDumpResponse(dumped_processes=[
rdf_memory.YaraProcessDumpInformation(memory_regions=[
rdf_memory.ProcessMemoryRegion(
start=1,
size=1,
file=rdf_paths.PathSpec.Temp(
path="/C:/grr/x_1_0_1.tmp")),
rdf_memory.ProcessMemoryRegion(
start=1,
size=1,
file=rdf_paths.PathSpec.Temp(
path="/C:/GRR/x_1_1_2.tmp"))
])
])
})
pathspecs = [
rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_0_1.tmp"),
rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_1_2.tmp")
]
responses = flow_responses.Responses.FromResponses(request, [
rdf_flow_objects.FlowResponse(
payload=rdf_client_fs.StatEntry(pathspec=pathspec))
for pathspec in pathspecs
])
flow.ProcessMemoryRegions(responses)
flow.SendReply.assert_any_call(
rdf_memory.YaraProcessDumpResponse(dumped_processes=[
rdf_memory.YaraProcessDumpInformation(memory_regions=[
rdf_memory.ProcessMemoryRegion(
start=1,
size=1,
file=rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_0_1.tmp")),
rdf_memory.ProcessMemoryRegion(
start=1,
size=1,
file=rdf_paths.PathSpec.Temp(path="/C:/Grr/x_1_1_2.tmp"))
])
]))
@unittest.skipIf(
platform.system() != "Linux",
"FakeMemoryProcess.serialized_file_descriptor works only on Linux.")
class YaraFlowsUnprivilegedTest(YaraFlowsTest):
def setUp(self):
super().setUp()
stack = contextlib.ExitStack()
self.addCleanup(stack.close)
stack.enter_context(
test_lib.ConfigOverrider({"Client.use_memory_sandboxing": True}))
# Use smaller batch size to exercise the batching logic.
stack.enter_context(
mock.patch.object(memory_actions.BatchedUnprivilegedYaraWrapper,
"BATCH_SIZE", 2))
# Tracking of time works differently in unprivileged mode.
# (There isn't one call to RDFDatetime.Now() per chunk due to batching).
def testScanTimingInformation(self):
with test_lib.FakeTime(10000, increment=1):
_, _, misses = self._RunYaraProcessScan(
self.procs, pids=[105], include_misses_in_results=True)
self.assertLen(misses, 1)
miss = misses[0]
self.assertEqual(miss.scan_time_us, 3 * 1e6)
with test_lib.FakeTime(10000, increment=1):
matches, _, _ = self._RunYaraProcessScan(self.procs, pids=[102])
self.assertLen(matches, 1)
match = matches[0]
self.assertEqual(match.scan_time_us, 3 * 1e6)
# The following tests don't work with sandboxing, because they mock
# yara.compile, which is executed in the unprivileged process.
def testPerProcessTimeout(self):
pass
def testPerProcessTimeoutArg(self):
pass
def testTooManyHitsError(self):
pass
def testYaraProcessScanChunkingWorks(self):
pass
class YaraProcessScanTest(flow_test_lib.FlowTestsBaseclass):
@classmethod
def setUpClass(cls):
super(YaraProcessScanTest, cls).setUpClass()
testing_startup.TestInit()
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testYaraSignatureReferenceDeliversFullSignatureToClient(self):
signature = "rule foo { condition: true };"
blob = signature.encode("utf-8")
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)
data_store.REL_DB.WriteGRRUser(username="foobarski")
data_store.REL_DB.WriteYaraSignatureReference(blob_id, username="foobarski")
args = rdf_memory.YaraProcessScanRequest()
args.yara_signature_blob_id = blob_id.AsBytes()
shards = []
class FakeYaraProcessScan(action_mocks.ActionMock):
def YaraProcessScan(
self,
args: rdf_memory.YaraProcessScanRequest,
) -> Iterable[rdf_memory.YaraProcessScanResponse]:
shards.append(args.signature_shard)
return []
self._YaraProcessScan(args, action_mock=FakeYaraProcessScan())
payloads = [_.payload for _ in sorted(shards, key=lambda _: _.index)]
self.assertEqual(b"".join(payloads).decode("utf-8"), signature)
def testYaraSignatureReferenceIncorrect(self):
data = "This is very c0nfidential and should not leak to the client."
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(data.encode("utf-8"))
args = rdf_memory.YaraProcessScanRequest()
args.yara_signature_blob_id = blob_id.AsBytes()
with self.assertRaisesRegex(RuntimeError, "signature reference"):
self._YaraProcessScan(args)
def testYaraSignatureReferenceNotExisting(self):
args = rdf_memory.YaraProcessScanRequest()
args.yara_signature_blob_id = os.urandom(32)
with self.assertRaisesRegex(RuntimeError, "signature reference"):
self._YaraProcessScan(args)
def testYaraSignatureAndSignatureReference(self):
signature = "rule foo { condition: true };"
blob = signature.encode("utf-8")
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob)
data_store.REL_DB.WriteGRRUser(username="foobarski")
data_store.REL_DB.WriteYaraSignatureReference(blob_id, username="foobarski")
args = rdf_memory.YaraProcessScanRequest()
args.yara_signature = signature
args.yara_signature_blob_id = blob_id.AsBytes()
with self.assertRaisesRegex(RuntimeError, "can't be used together"):
self._YaraProcessScan(args)
def _YaraProcessScan(
self,
args: rdf_memory.YaraProcessScanRequest,
action_mock: Optional[action_mocks.ActionMock] = None,
) -> None:
if action_mock is None:
action_mock = action_mocks.ActionMock()
flow_test_lib.TestFlowHelper(
memory.YaraProcessScan.__name__,
action_mock,
client_id=self.client_id,
creator=self.test_username,
args=args)
flow_test_lib.FinishAllFlowsOnClient(self.client_id)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
|
#!/usr/local/bin/python
#
# Copyright (c) 2009-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import dirname, abspath, join as path_join
from inspect import getfile, currentframe
currentdir = dirname(abspath(getfile(currentframe())))
sippydir = path_join(currentdir, 'sippy_lite')
sys.path.insert(0, sippydir)
from Rtp_cluster_config import read_cluster_config
from Rtp_cluster import Rtp_cluster
from Rtp_cluster_member import Rtp_cluster_member
import getopt, os
import sys
import signal
from pwd import getpwnam
from grp import getgrnam
from socket import AF_INET, AF_INET6, AF_UNIX
from sippy.SipConf import MyAddress
from sippy.Signal import LogSignal
from sippy.SipLogger import SipLogger
from sippy.misc import daemonize
from sippy.Core.EventDispatcher import ED2
from Rtp_cluster_cli import Rtp_cluster_cli
class fakecli(object):
rtp_clusters = None
def __init__(self):
self.rtp_clusters = []
def usage():
print('usage: rtp_cluster.py [-fd] [-P pidfile] [-c conffile] [-L logfile] [-s cmd_socket]\n' \
' [-o uname:gname]')
sys.exit(1)
def debug_signal(signum, frame):
import sys, traceback
for thread_id, stack in sys._current_frames().iteritems():
print('Thread id: %s\n%s' % (thread_id, ''.join(traceback.format_stack(stack))))
def reopen(logfile):
print('Signal %d received, reopening logs' % signum)
if logfile == None:
return
fake_stdout = open(logfile, 'a', 1)
sys.stdout = fake_stdout
sys.stderr = fake_stdout
fd = fake_stdout.fileno()
os.dup2(fd, sys.__stdout__.fileno())
os.dup2(fd, sys.__stderr__.fileno())
def terminate():
ED2.breakLoop()
if __name__ == '__main__':
global_config = {}
try:
opts, args = getopt.getopt(sys.argv[1:], 'fP:c:L:s:o:dD')
except getopt.GetoptError:
usage()
sip_logger = SipLogger('rtp_cluster')
sip_logger.write('Starting up...')
foreground = False
dry_run = False
debug_threads = False
pidfile = '/var/run/rtp_cluster.pid'
logfile = '/var/log/rtp_cluster.log'
csockfile = '/var/run/rtp_cluster.sock'
global_config['conffile'] = '/usr/local/etc/rtp_cluster.xml'
global_config['_sip_address'] = MyAddress()
for o, a in opts:
if o == '-f':
foreground = True
continue
if o == '-P':
pidfile = a.strip()
continue
if o == '-c':
global_config['conffile'] = a.strip()
continue
if o == '-L':
logfile = a.strip()
continue
if o == '-s':
csockfile = a.strip()
continue
if o == '-o':
sown_user, sown_gpr = a.split(':', 1)
sown_uid = getpwnam(sown_user).pw_uid
sown_gid = getgrnam(sown_gpr).gr_gid
global_config['_rtpc_sockowner'] = (sown_uid, sown_gid)
continue
if o == '-d':
dry_run = True
foreground = True
continue
if o == '-D':
debug_threads = True
continue
sip_logger.write(' o reading config "%s"...' % \
global_config['conffile'])
global_config['_sip_logger'] = sip_logger
f = open(global_config['conffile'])
config = read_cluster_config(global_config, f.read())
if not foreground:
# Shut down the logger and reopen it again to make sure it's worker
# thread won't be affected by the fork()
sip_logger.shutdown()
daemonize(logfile = logfile)
open(pidfile, 'w').write(str(os.getpid()) + '\n')
sip_logger = SipLogger('rtp_cluster')
global_config['_sip_logger'] = sip_logger
LogSignal(sip_logger, signal.SIGUSR1, reopen, logfile)
LogSignal(sip_logger, signal.SIGTERM, terminate)
sip_logger.write(' o initializing CLI...')
if not dry_run:
cli = Rtp_cluster_cli(global_config, address = csockfile)
else:
cli = fakecli()
for c in config:
#print 'Rtp_cluster', global_config, c['name'], c['address']
sip_logger.write(' o initializing cluster "%s" at <%s>' % (c['name'], c['address']))
rtp_cluster = Rtp_cluster(global_config, c['name'], c['address'], \
dnconfig = c.get('dnconfig', None), dry_run = dry_run)
rtp_cluster.capacity_limit_soft = c.get('capacity_limit_soft', True)
for rtpp_config in c['rtpproxies']:
sip_logger.write(' - adding RTPproxy member %s at <%s>' % (rtpp_config['name'], rtpp_config['address']))
#Rtp_cluster_member('rtpproxy1', global_config, ('127.0.0.1', 22222))
if rtpp_config['protocol'] not in ('unix', 'udp', 'udp6'):
raise Exception('Unsupported RTPproxy protocol: "%s"' % rtpp_config['protocol'])
if rtpp_config['protocol'] in ('udp', 'udp6'):
address = rtpp_config['address'].rsplit(':', 1)
if len(address) == 1:
address.append(22222)
else:
address[1] = int(address[1])
address = tuple(address)
if rtpp_config['protocol'] == 'udp':
family = AF_INET
else:
family = AF_INET6
else:
address = rtpp_config['address']
family = AF_UNIX
if 'cmd_out_address' in rtpp_config:
bind_address = rtpp_config['cmd_out_address']
else:
bind_address = None
rtpp = Rtp_cluster_member(rtpp_config['name'], global_config, address, bind_address, family = family)
rtpp.weight = rtpp_config['weight']
rtpp.capacity = rtpp_config['capacity']
rtpp.status = rtpp_config['status']
if 'wan_address' in rtpp_config:
rtpp.wan_address = rtpp_config['wan_address']
if 'lan_address' in rtpp_config:
rtpp.lan_address = rtpp_config['lan_address']
rtp_cluster.add_member(rtpp)
cli.rtp_clusters.append(rtp_cluster)
#rtp_cluster = Rtp_cluster(global_config, 'supercluster', dry_run = dry_run)
if dry_run:
sip_logger.write('Configuration check is complete, no errors found')
for rtp_cluster in cli.rtp_clusters:
rtp_cluster.shutdown()
sip_logger.shutdown()
from time import sleep
# Give worker threads some time to cease&desist
sleep(0.1)
sys.exit(0)
if debug_threads:
signal.signal(signal.SIGINFO, debug_signal)
sip_logger.write('Initialization complete, have a good flight.')
ED2.loop()
|
|
#!/usr/bin/python
# Copyright (c) 2008-2011 Mark Eichin <[email protected]>
# See ./LICENSE (MIT style.)
"""take recent twitters and zephyr them to me"""
__version__ = "0.4"
__author__ = "Mark Eichin <[email protected]>"
__license__ = "MIT"
import sys
import os
import getpass
import subprocess
import signal
import tweepy
import time
from lengthener import lengthen
def get_oauth_info(appname=None):
"""get this user's oauth info"""
filebase = os.path.expanduser("~/.ztwit_")
if appname:
# default path is ztwitgw
filebase += appname + "_"
key, secret = file(filebase + "oauth", "r").read().strip().split(":", 1)
return key, secret
# TODO: write get_verifier_X11
def get_verifier_tty(output_path, appname=None):
"""we don't have a verifier, ask the user to use a browser and get one"""
consumer_token, consumer_secret = get_oauth_info(appname=appname)
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
redirect_url = auth.get_authorization_url() # tweepy.TweepError
print "Open this URL in a browser where you're logged in to twitter:"
print redirect_url
verifier = raw_input("Enter (cut&paste) the response code: ")
# use it...
auth.get_access_token(verifier)
# hmm, discard the verifier?
file(output_path, "wb").write(":".join([auth.request_token.key,
auth.request_token.secret,
auth.access_token.key,
auth.access_token.secret,
verifier]))
def get_just_verifier(output_path, appname=None):
"""ask for the verifier *without* having consumer info"""
auth = tweepy.OAuthHandler("", "")
# TODO: this can't work unless we first give the user a redirect to the
# URL to *get* the response code. and possibly not then?
verifier = raw_input("Enter (cut&paste) the response code: ")
# use it...
auth.get_access_token(verifier)
# hmm, discard the verifier?
file(output_path, "wb").write(":".join([auth.request_token.key,
auth.request_token.secret,
auth.access_token.key,
auth.access_token.secret,
verifier]))
def get_oauth_verifier(fallback_mechanism, appname=None):
"""get the request token and verifier, using fallback_mechanism if we don't have one stashed"""
filebase = os.path.expanduser("~/.ztwit_")
if appname:
# default path is ztwitgw
filebase += appname + "_"
verifier_file = filebase + "oauth_verifier"
if not os.path.exists(verifier_file):
fallback_mechanism(verifier_file, appname=appname)
if not os.path.exists(verifier_file):
raise Exception("Fallback Failed")
rt_key, rt_secret, at_key, at_secret, verifier = file(verifier_file, "r").read().strip().split(":", 4)
return rt_key, rt_secret, at_key, at_secret, verifier
# do this with a localhost url?
def zwrite(username, body, tag, status_id=None):
"""deliver one twitter message to zephyr"""
# username... will get encoded when we see one
try:
body = body.encode("iso-8859-1", "xmlcharrefreplace")
except UnicodeDecodeError, ude:
body = repr(body) + ("\n[encode fail: %s]" % ude)
body = body.encode("iso-8859-1", "xmlcharrefreplace")
# example syntax: http://twitter.com/engadget/status/18164103530
zurl = " http://twitter.com/%s/status/%s" % (username, status_id) if status_id else ""
zsig = "%s %s%svia ztwitgw%s" % (username, tag, tag and " ", zurl)
# tag is from codde
cmd = ["zwrite",
"-q", # quiet
"-d", # Don't authenticate
"-s", zsig,
"-c", "%s.twitter" % getpass.getuser(),
"-i", username,
"-m", body]
subprocess.check_call(cmd)
def entity_decode(txt):
"""decode simple entities"""
# TODO: find out what ones twitter considers defined,
# or if sgmllib.entitydefs is enough...
return txt.replace(">", ">").replace("<", "<").replace("&", "&")
# turns out we don't actually see & in practice...
assert entity_decode("-> <3") == "-> <3"
def maybe_lengthen(url):
"""lengthen the url (with an old-ref) *or* leave it untouched"""
new_url = lengthen(url)
if not new_url:
return url
return "%s (via %s )" % (new_url, url)
def slice_substitute(target, offset, low, high, replacement):
"""substitute replacement into target in span low..high; return new target, new offset"""
target = target[:low+offset] + replacement + target[high+offset:]
offset += len(replacement) - (high - low)
return target, offset
assert slice_substitute("abcdefghij", 0, 3, 6, "DEF") == ('abcDEFghij', 0)
assert slice_substitute("abcdefghij", 0, 3, 6, "X") == ('abcXghij', -2)
assert slice_substitute("abcdefghij", 0, 3, 6, "__DEF__") == ('abc__DEF__ghij', 4)
def url_expander(twit, body):
"""expand urls in the body, safely"""
expcount = 0
urlcount = 0
longcount = 0
offset = 0
try:
# https://dev.twitter.com/docs/tweet-entities
# do media later, stick with urls for now
for urlblock in twit.entities.get("urls", []):
low, high = urlblock["indices"]
if urlblock.get("expanded_url"):
body, offset = slice_substitute(body, offset, low, high,
maybe_lengthen(urlblock["expanded_url"]))
expcount += 1
else:
raw_replacement = maybe_lengthen(urlblock["url"])
if raw_replacement != urlblock["url"]:
body, offset = slice_substitute(body, offset, low, high, raw_replacement)
longcount += 1
urlcount += 1
if expcount or urlcount or longcount:
return body + ("\n[expanded %s/%s urls, lengthened %s]" % (expcount, urlcount, longcount))
return body
except Exception, exc:
return body + ("[expander failed: %s]" % exc)
def process_new_twits(api, proto=None, tag=""):
"""process new messages, stashing markers"""
if proto is None:
proto = api.home_timeline
filebase = os.path.expanduser("~/.ztwit_")
if tag:
filebase = filebase + tag + "_"
sincefile = filebase + "since"
since_id = None
if os.path.exists(sincefile):
since_id = file(sincefile, "r").read().strip()
# if since_id: # allow for truncated file
# the iterators *have* a prev, but there's no way to "start" at since_id?
# favorites.json doesn't take an id arg, and it's not like we save anything
# (other than parsing) by walking up and then down, since the json for the
# entire set is loaded anyway...
for twit in reversed(list(tweepy.Cursor(proto, since_id=since_id, include_entities=1).items())):
# reversed?
if not twit:
print "huh? empty twit"
continue
# type(twit) == tweepy.models.Status
# type(twit.author) == tweepy.models.User
who = twit.author.screen_name
what = entity_decode(url_expander(twit, twit.text))
status_id = twit.id_str # to construct a link
zwrite(who, what, tag, status_id)
since_id = status_id
print "Sent:", since_id
time.sleep(3)
signal.alarm(5*60) # if we're actually making progress, push back the timeout
# Note that since_id is just an ordering - if I favorite an old tweet (even
# something that showed up new because it was freshly retweeted) it doesn't
# show up. This isn't a new bug, I'm just noticing it...
newsince = file(sincefile, "w")
print >> newsince, since_id
newsince.close()
def display_rate_limit(tag, limit):
"""Display rate limit if it isn't at maximum; return available count for convenience"""
if limit["remaining"] != limit["limit"]:
print limit["remaining"], "out of", limit["limit"], tag, "queries available"
reset_time = limit["reset"]
print " Will reset in", reset_time - time.time(), "seconds, at", time.ctime(reset_time)
return limit["remaining"]
if __name__ == "__main__":
# This conflicts with the long-lag retry mode, so just turn it off for now
# signal.alarm(5*60) # been seeing some hangs, give up after a bit
prog, = sys.argv
rt_key, rt_secret, at_key, at_secret, verifier = get_oauth_verifier(get_verifier_tty)
consumer_token, consumer_secret = get_oauth_info()
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_request_token(rt_key, rt_secret)
auth.set_access_token(at_key, at_secret)
print "ct:", consumer_token
print "cs:", consumer_secret
print "rk:", rt_key
print "rs:", rt_secret
print "vf:", verifier
print "ak:", at_key
print "as:", at_secret
# request limits reset every 15 minutes, so retry in 16
# retry 10 times to allow us to get 3000 messages behind
# set the timeout to match the retry count
api = tweepy.API(auth, retry_delay=16*60, retry_count=10, timeout=160*60)
limits = api.rate_limit_status()
home_left = display_rate_limit("home", limits["resources"]["statuses"]["/statuses/home_timeline"])
ment_left = display_rate_limit("mentions", limits["resources"]["statuses"]["/statuses/mentions_timeline"])
fave_left = display_rate_limit("favorites", limits["resources"]["favorites"]["/favorites/list"])
if home_left > 0:
process_new_twits(api)
if ment_left > 0:
process_new_twits(api, proto=api.mentions_timeline, tag="reply")
# replies_url = "http://twitter.com/statuses/replies.json"
# but that's not in tweepy... try hacking it?
# hmm, not in http://apiwiki.twitter.com/w/page/22554679/Twitter-API-Documentation either
if fave_left > 0:
process_new_twits(api, proto=api.favorites, tag="favorites")
|
|
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(DeprecationWarning, "The DPGMM class is"
" not working correctly and it's better "
"to not use it. DPGMM is deprecated in 0.18 "
"and will be removed in 0.20.", DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
@ignore_warnings(category=DeprecationWarning)
def test_VBGMM_deprecation():
assert_warns_message(DeprecationWarning, "The VBGMM class is"
" not working correctly and it's better"
" to not use it. VBGMM is deprecated in 0.18"
" and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
|
|
from __future__ import division
import sys
import os
import shutil
import warnings
import inspect
import configparser
import threading as thread
import traceback
import copy
import logging
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('agg') # Prevents crash when GUI runs matplotlib in thread on Linux
import matplotlib.pyplot as plt
from .. import empirical as emp
from .. import models as mod
from .. import compare as comp
from .. import misc
import time
def _better_time(gmtime=None):
return
def main(param_path='parameters.txt'):
"""
Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file
"""
# Confirm parameters file is present
if not os.path.isfile(param_path):
raise IOError, "Parameter file not found at %s" % param_path
# Get raw params and base options (non-run-dependent options)
params, base_options = _get_params_base_options(param_path)
# Configure and start logging
# Done here instead of in function so will affect all subsequent calls
log_path = os.path.join(base_options['results_dir'], '_log.txt')
if os.path.isfile(log_path):
os.remove(log_path)
logging.basicConfig(level=logging.INFO, format='%(message)s')
fileh = logging.FileHandler(log_path)
fileh.setLevel(logging.DEBUG)
filefmt = logging.Formatter(
time.strftime("%Y/%m/%d %H:%M:%S %p", time.localtime()) +
' - %(name)s - %(levelname)s - %(message)s')
fileh.setFormatter(filefmt)
logging.getLogger('').addHandler(fileh)
def log_uncaught(type1, value1, traceback1):
tb_list = traceback.format_exception(type1, value1, traceback1)
tb_str = ''.join(tb_list)
logging.critical('\n\n'+tb_str)
sys.excepthook = log_uncaught
logging.info('Running macroeco') # v%s' % __version__)
logging.info('Parameters file at %s' % os.path.abspath(param_path))
# Preliminary check for errors in parameters file
bad_params = misc.check_parameter_file(param_path)
if len(bad_params[0]) > 0:
logging.warning("Possible formatting error(s) in" +
" %s: parameters %s on lines %s"
% (param_path, bad_params[0], bad_params[1]))
logging.info('Starting analysis')
# Do analysis for each run
for run_name in base_options['run_names']:
logging.info('Starting run %s' % run_name)
options = dict(params[run_name]) # All parameters from this run
options.update(base_options) # Add base parameters
options['run_dir'] = os.path.join(base_options['results_dir'],run_name)
if 'format' in options['analysis']:
_do_format(options)
else:
_do_analysis(options)
logging.info('Finished run %s' % run_name)
logging.info('Finished analysis successfully')
logging.info('Results available at %s' % options['param_dir'])
# Close logging - releases log file lock in Windows GUI
logging.shutdown()
def _get_params_base_options(param_path):
# Read parameter file into params object
params = configparser.ConfigParser()
try:
params.read(param_path)
except:
raise ValueError, "Parameter file is invalid"
# Setup param_dir and results_dir, get run_names
param_dir = os.path.abspath(os.path.dirname(param_path))
results_dir = os.path.join(param_dir, 'results')
if os.path.isdir(results_dir):
shutil.rmtree(results_dir)
os.makedirs(results_dir)
run_names = params.sections()
# Check there's at least one run
if not run_names:
raise NameError, "Parameters file must contain at least one run"
# Create options dict
base_options = {}
base_options['param_dir'] = param_dir
base_options['results_dir'] = results_dir
base_options['run_names'] = run_names
return params, base_options
def _do_format(options):
datapath = os.path.normpath(os.path.join(options['param_dir'],
options['data']))
out_path = os.path.splitext(datapath)[0] + "_formatted.csv"
format_type = options['analysis'].split('_')[1]
misc.data_read_write(datapath, out_path, format_type, **options)
def _do_analysis(options):
"""
Do analysis for a single run, as specified by options.
Parameters
----------
options : dict
Option names and values for analysis
"""
module = _function_location(options)
core_results = _call_analysis_function(options, module)
if module == 'emp' and ('models' in options.keys()):
fit_results = _fit_models(options, core_results)
else:
fit_results = None
_save_results(options, module, core_results, fit_results)
def _function_location(options):
# TODO: Add spec and misc modules
# This relies on the assumption that there are no duplicate member names
# in the different modules.
func_name = options['analysis'].split('.')[0] # Ignore method if present
emp_members = [x[0] for x in inspect.getmembers(emp)]
mod_members = [x[0] for x in inspect.getmembers(mod)]
if func_name in emp_members:
module = 'emp'
elif func_name in mod_members:
module = 'mod'
else:
raise ValueError, ("No analysis of type '%s' is available" %
options['analysis'])
return module
def _call_analysis_function(options, module):
"""
Call function from module and get result, using inputs from options
Parameters
----------
options : dict
Option names and values for analysis
module : str
Short name of module within macroeco containing analysis function
Returns
-------
dataframe, array, value, list of tuples
Functions from emp module return a list of tuples in which first
element of the tuple gives a string describing the result and the
second element giving the result of the analysis as a dataframe.
Functions in other modules return dataframe, array, or value.
"""
args, kwargs = _get_args_kwargs(options, module)
return eval("%s.%s(*args, **kwargs)" % (module, options['analysis']))
def _get_args_kwargs(options, module):
"""
Given an options (including analysis), and module, extract args and kwargs
"""
if module == 'emp':
options = _emp_extra_options(options)
arg_names, kw_names = _arg_kwarg_lists(module, options['analysis'])
# Create list of values for arg_names
args = []
for arg_name in arg_names:
if arg_name == 'patch': # For patch arg, append actual patch obj
args.append(options['patch'])
continue
if arg_name == 'self': # Ignore self from class methods
continue
if arg_name == 'k': # scipy dists use k and x, we always use x
arg_name = 'x'
try:
exec 'args.append(eval("%s"))' % options[arg_name]
except SyntaxError: # eval failing because option is a string
args.append(options[arg_name])
except:
raise ValueError, ("Value for required argument %s not provided"
% arg_name)
# Create dict with vals for kw_names
kwargs = {}
for kw_name in kw_names:
if kw_name in options.keys(): # If a value is given for this kwarg
try:
exec 'kwargs[kw_name] = eval("%s")' % options[kw_name]
except SyntaxError: # eval failing because value is a string
kwargs[kw_name] = options[kw_name]
except:
raise ValueError, ("Value for optional argument %s is invalid"
% kw_name)
return args, kwargs
def _emp_extra_options(options):
"""
Get special options patch, cols, and splits if analysis in emp module
"""
# Check that metadata is valid
metadata_path = os.path.normpath(os.path.join(options['param_dir'],
options['metadata']))
if not os.path.isfile(metadata_path):
raise IOError, ("Path to metadata file %s is invalid." %
metadata_path)
options['metadata_path'] = metadata_path
# Using subset if given, create and store patch
subset = options.get('subset', '')
options['patch'] = emp.Patch(metadata_path, subset)
# If cols or splits not given in options, make empty strings
if 'cols' not in options.keys():
options['cols'] = ''
if 'splits' not in options.keys():
options['splits'] = ''
return options
def _arg_kwarg_lists(module, analysis):
# Get names of args and kwargs to method specified by analysis option
exec ("arg_and_kwd_names, _, _, kw_defaults = "
"inspect.getargspec(%s.%s)" % (module, analysis))
if kw_defaults: # If there are kwargs
arg_names = arg_and_kwd_names[:-len(kw_defaults)]
kw_names = arg_and_kwd_names[-len(kw_defaults):]
else: # If no kwargs
arg_names = arg_and_kwd_names
kw_names = []
# Inspection for rv classes doesn't work since it uses args internally
# Unless method is translate_args or fit_mle, appends shapes to args
try:
obj_meth = analysis.split('.')
if obj_meth[1] not in ['fit_mle', 'translate_args']:
arg_names += eval(module + '.' + obj_meth[0] + '.' +
"shapes.replace(' ','').split(',')")
if obj_meth[1] == 'rvs': # Inspection for size not working
kw_names.append('size')
except:
pass
return arg_names, kw_names
def _fit_models(options, core_results):
"""
Fit models to empirical result from a function in emp module
Parameters
----------
options : dict
Option names and values for analysis
core_results : list of tuples
Output of function in emp
Returns
-------
list of dicts
Each element in list corresponds to a subset. The dict has a key for
each model given in options, and the value is a list of fitted
parameters (tuple), values (array), comparison statistic names (list),
and comparison statistic values (list).
Notes
-----
To determine if the empirical result refers to a curve or a distribution,
the result dataframe is inspected for a column 'x', which indicates a
curve.
"""
logging.info("Fitting models")
models = options['models'].replace(' ', '').split(';')
# TODO: Make work for 2D results, i.e., curves, comm_sep, o_ring
# TODO: Make work for curves in general (check if 'x' present in core_res)
fit_results = []
for core_result in core_results: # Each subset
fit_result = {}
for model in models:
fits = _get_fits(core_result, model, options)
values = _get_values(core_result, model, fits)
stat_names, stats = _get_comparison_stat(core_result, values,
model, fits)
fit_result[model] = [fits, values, stat_names, stats]
fit_results.append(fit_result)
return fit_results
def _get_fits(core_result, model, options):
options_copy = {}
for key, val in options.iteritems():
if key not in ['patch']: # Ignore patch since won't deepcopy
options_copy[key] = copy.deepcopy(val)
model_obj = eval('mod.' + model)
if hasattr(model_obj, 'fit_mle'):
options_copy['analysis'] = model + '.' + 'fit_mle'
options_copy['data'] = core_result[1]['y'].values
else:
options_copy['analysis'] = model + '.' + 'fit_lsq'
options_copy['x'] = core_result[1]['x'].values
options_copy['y_obs'] = core_result[1]['y'].values
options_copy['df'] = core_result[1] # Entire result df, for mete_sar
return _call_analysis_function(options_copy, 'mod')
def _get_values(core_result, model, fits):
model_obj = eval('mod.' + model)
if hasattr(model_obj, 'vals'):
x = core_result[1]['x'].values # Calc model at x values
values = eval("mod.%s.vals(x, *fits)" % model)
else:
n = len(core_result[1]) # Calc model at data values
values = eval("mod.%s.rank(n, *fits)" % model)
return values
def _get_comparison_stat(core_result, values, model, fits):
# Uses AIC for distributions, R2 one-to-one for curves
try: # Only curves have vals
eval("mod.%s" % model + ".vals.__doc__")
obs = core_result[1]['y'].values
pred = values
name = ['R2']
stat = comp.r_squared(obs, pred, one_to_one=True)
except AttributeError:
obs = core_result[1]['y'].values
name = ['AIC']
stat = comp.AIC(obs, eval("mod.%s" % model + "(*fits)"))
return name, stat
def _save_results(options, module, core_results, fit_results):
"""
Save results of analysis as tables and figures
Parameters
----------
options : dict
Option names and values for analysis
module : str
Module that contained function used to generate core_results
core_results : dataframe, array, value, list of tuples
Results of main analysis
fit_results : list or None
Results of comparing emp analysis to models, None if not applicable
"""
logging.info("Saving all results")
# Use custom plot format
mpl.rcParams.update(misc.rcparams.ggplot_rc)
# Make run directory
os.makedirs(options['run_dir'])
# Write core results
_write_core_tables(options, module, core_results)
# Write additional results if analysis from emp
if module == 'emp':
_write_subset_index_file(options, core_results)
# Write model/data comparison if models were given
if fit_results:
models = options['models'].replace(' ','').split(';')
for i, core_result in enumerate(core_results):
_write_fitted_params(i, models, options, fit_results)
_write_test_statistics(i, models, options, fit_results)
_write_comparison_plot_table(i, models, options,
core_results, fit_results)
def _write_core_tables(options, module, core_results):
"""
Notes
-----
Depending on function that was called for analysis, core_results may be a
list of tuples (empirical), a dataframe, an array, or a single value.
For the list of tuples from empirical, the second element of each tuple is
the raw result, and we write them all with the appropriate prefix. For
dataframes, we write them. For arrays or single values, we convert to data
frames and write them.
"""
table_name = 'core_result.csv'
single_file_path = os.path.join(options['run_dir'], table_name)
if module == 'emp': # List of tuples
for i, core_result in enumerate(core_results):
file_path = _get_file_path(i, options, table_name)
core_result[1].to_csv(file_path, index=False, float_format='%.4f')
elif type(core_results) == type(pd.DataFrame()): # DataFrame
core_results.to_csv(single_file_path, index=False, float_format='%.4f')
else: # Array or single value (atleast_1d corrects for unsized array)
df = pd.DataFrame({'y': np.atleast_1d(core_results)})
df.to_csv(single_file_path, index=False, float_format='%.4f')
def _get_file_path(spid, options, file_name):
return os.path.join(options['run_dir'],
'%i_%s' % (spid+1, file_name))
def _write_subset_index_file(options, core_results):
"""
Write table giving index of subsets, giving number and subset string
"""
f_path = os.path.join(options['run_dir'], '_subset_index.csv')
subset_strs = zip(*core_results)[0]
index = np.arange(len(subset_strs)) + 1
df = pd.DataFrame({'subsets': subset_strs}, index=index)
df.to_csv(f_path)
def _write_fitted_params(spid, models, options, fit_results):
# TODO: Consider converting to pandas, need to deal with variable length
# TODO: Possibility - empty data frame max length, max width = nparams
f = open(_get_file_path(spid, options, 'fitted_params.csv'), 'w')
f.write("Model, Fit Parameters\n")
for model in models:
fit_result = fit_results[spid][model]
mod_fits = str(fit_result[0])[1:-1] # Drop parens around tuple
f.write("%s,%s\n" % (model, mod_fits))
f.close()
def _write_test_statistics(spid, models, options, fit_results):
# TODO: Add delta test statistics columns
# TODO: Make dataframe?
f = open(_get_file_path(spid, options, 'test_statistics.csv'), 'w')
# Gets stat name list from any element of result dict - same for all
stat_names_list = next(fit_results[spid].itervalues())[2]
stat_names_str = str(stat_names_list)[1:-1].strip("'")
f.write("Model, %s\n" % stat_names_str)
for model in models:
fit_result = fit_results[spid][model]
fit_stats = str(fit_result[3])[:]
f.write("%s,%s\n" % (model, fit_stats))
f.close()
def _write_comparison_plot_table(spid, models, options, core_results,
fit_results):
"""
Notes
-----
Only applies to analysis using functions from empirical in which models are
also given.
"""
# TODO: Clean up sorting, may not work if SAR x out of order, e.g.
is_curve = 'x' in core_results[0][1]
df = core_results[spid][1]
df.rename(columns={'y': 'empirical'}, inplace=True)
# If distribution, need to sort values so will match sorted rank in fits
if not is_curve:
x = np.arange(len(df)) + 1
df = df.sort(columns='empirical')
df.insert(0, 'x', x[::-1])
# Add residual column for each model
for model in models:
fit_result = fit_results[spid][model]
df[model] = fit_result[1]
df[model + "_residual"] = df[model] - df['empirical']
# If curve, sort now for plotting purposes
if is_curve:
df = df.sort(columns='x')
# Set up file paths
f_path = _get_file_path(spid, options, 'data_models.csv')
p_path = _get_file_path(spid, options, 'data_models.pdf')
# Save table
df.to_csv(f_path, index=False, float_format='%.4f') # Table
# Save plot
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(df['x'], df['empirical'], color='k')
ax1.plot(df['x'], df[models])
ax1.legend(models + ['empirical'], loc='best')
ax1.set_xlabel('x')
ax1.set_ylabel('value')
ax2.hlines(0, np.min(df['x']), np.max(df['x']))
ax2.plot(df['x'], df[[x + '_residual' for x in models]])
ax2.legend(models + ['empirical'], loc='best')
ax2.set_xlabel('x')
ax2.set_ylabel('residual')
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(min(ax2.get_ylim()[0], -1), max(ax2.get_ylim()[1], 1))
if options.get('log_y', None):
ax1.set_yscale('log')
ax2.set_yscale('symlog', linthreshy=1)
if options.get('log_x', None):
ax1.set_xscale('log')
ax2.set_xscale('log')
if not options.get('log_x', None) and not options.get('log_y', None):
ax1.set_ylim(bottom=0)
ax1.set_xlim(left=0)
ax1 = _pad_plot_frame(ax1)
ax2 = _pad_plot_frame(ax2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout()
fig.savefig(p_path)
plt.close('all')
def _pad_plot_frame(ax, pad=0.01):
"""
Provides padding on sides of frame equal to pad fraction of plot
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
xr = xmax - xmin
yr = ymax - ymin
ax.set_xlim(xmin - xr*pad, xmax + xr*pad)
ax.set_ylim(ymin - yr*pad, ymax + yr*pad)
return ax
def _output_cdf_plot(core_result, spid, models, options, fit_results):
"""Function for plotting cdf"""
# CDF
x = core_result['y'].values
df = emp.empirical_cdf(x)
df.columns = ['x', 'empirical']
def calc_func(model, df, shapes):
return eval("mod.%s.cdf(df['x'], *shapes)" % model)
plot_exec_str = "ax.step(df['x'], emp, color='k', lw=3);ax.set_ylim(top=1)"
_save_table_and_plot(spid, models, options, fit_results, 'data_pred_cdf',
df, calc_func, plot_exec_str)
def output_pdf_plot(core_result, spid, models, options, fit_results):
""" Function for plotting pdf/pmf """
# PDF/PMF
hist_bins = 11
emp_hist, edges = np.histogram(core_result['y'].values, hist_bins,
normed=True)
x = (np.array(edges[:-1]) + np.array(edges[1:])) / 2
df = pd.DataFrame({'x': x, 'empirical': emp_hist})
def calc_func(model, df, shapes):
try:
return eval("mod.%s.pmf(np.floor(df['x']), *shapes)" % model)
except:
return eval("mod.%s.pdf(df['x'], *shapes)" % model)
plot_exec_str = "ax.bar(df['x']-width/2, emp, width=width, color='gray')"
_save_table_and_plot(spid, models, options, fit_results, 'data_pred_pdf',
df, calc_func, plot_exec_str)
|
|
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module defines some rewritings for pySMT formulae.
"""
from pysmt.walkers.dag import DagWalker
import pysmt.typing as types
import pysmt.operators as op
import pysmt.environment
class CNFizer(DagWalker):
THEORY_PLACEHOLDER = "__Placeholder__"
TRUE_CNF = frozenset()
FALSE_CNF = frozenset([frozenset()])
def __init__(self, environment=None):
DagWalker.__init__(self, environment)
self.mgr = self.env.formula_manager
self._introduced_variables = {}
self._cnf_pieces = {}
def _key_var(self, formula):
if formula in self._introduced_variables:
res = self._introduced_variables[formula]
else:
res = self.mgr.FreshSymbol()
self._introduced_variables[formula] = res
return res
def convert(self, formula):
"""Convert formula into an Equisatisfiable CNF.
Returns a set of clauses: a set of set of literals.
"""
tl, _cnf = self.walk(formula)
res = [frozenset([tl])]
for clause in _cnf:
if len(clause) == 0:
return CNFizer.FALSE_CNF
simp = []
for lit in clause:
if lit.is_true():
# Prune clauses that are trivially TRUE
simp = None
break
elif not lit.is_false():
# Prune FALSE literals
simp.append(lit)
if simp:
res.append(frozenset(simp))
return frozenset(res)
def convert_as_formula(self, formula):
"""Convert formula into an Equisatisfiable CNF.
Returns an FNode.
"""
lsts = self.convert(formula)
conj = []
for clause in lsts:
conj.append(self.mgr.Or(clause))
return self.mgr.And(conj)
def printer(self, _cnf):
print(self.serialize(_cnf))
return
def serialize(self, _cnf):
clauses = []
for clause in _cnf:
clauses +=[" { " + " ".join(str(lit) for lit in clause) + "} "]
res = ["{"] + clauses + ["}"]
return "".join(res)
def walk_forall(self, formula, args, **kwargs):
raise NotImplementedError("CNFizer does not support quantifiers")
def walk_exists(self, formula, args, **kwargs):
raise NotImplementedError("CNFizer does not support quantifiers")
def walk_and(self, formula, args, **kwargs):
if len(args) == 1:
return args[0]
k = self._key_var(formula)
_cnf = [frozenset([k] + [self.mgr.Not(a).simplify() for a,_ in args])]
for a,c in args:
_cnf.append(frozenset([a, self.mgr.Not(k)]))
for clause in c:
_cnf.append(clause)
return k, frozenset(_cnf)
def walk_or(self, formula, args, **kwargs):
if len(args) == 1:
return args[0]
k = self._key_var(formula)
_cnf = [frozenset([self.mgr.Not(k)] + [a for a,_ in args])]
for a,c in args:
_cnf.append(frozenset([k, self.mgr.Not(a)]))
for clause in c:
_cnf.append(clause)
return k, frozenset(_cnf)
def walk_not(self, formula, args, **kwargs):
a, _cnf = args[0]
if a.is_true():
return self.mgr.FALSE(), CNFizer.TRUE_CNF
elif a.is_false():
return self.mgr.TRUE(), CNFizer.TRUE_CNF
else:
k = self._key_var(formula)
return k, _cnf | frozenset([frozenset([self.mgr.Not(k),
self.mgr.Not(a).simplify()]),
frozenset([k, a])])
def walk_implies(self, formula, args, **kwargs):
a, cnf_a = args[0]
b, cnf_b = args[1]
k = self._key_var(formula)
not_a = self.mgr.Not(a).simplify()
not_b = self.mgr.Not(b).simplify()
return k, (cnf_a | cnf_b | frozenset([frozenset([not_a, b, k]),
frozenset([a, k]),
frozenset([not_b, k])]))
def walk_iff(self, formula, args, **kwargs):
a, cnf_a = args[0]
b, cnf_b = args[1]
k = self._key_var(formula)
not_a = self.mgr.Not(a).simplify()
not_b = self.mgr.Not(b).simplify()
not_k = self.mgr.Not(k)
return k, (cnf_a | cnf_b | frozenset([frozenset([not_a, not_b, k]),
frozenset([not_a, b, not_k]),
frozenset([a, not_b, not_k]),
frozenset([a, b, k])]))
def walk_symbol(self, formula, **kwargs):
if formula.is_symbol(types.BOOL):
return formula, CNFizer.TRUE_CNF
else:
return CNFizer.THEORY_PLACEHOLDER
def walk_function(self, formula, **kwargs):
ty = formula.function_symbol().symbol_type()
if ty.return_type.is_bool_type():
return formula, CNFizer.TRUE_CNF
else:
return CNFizer.THEORY_PLACEHOLDER
def walk_real_constant(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
def walk_bool_constant(self, formula, **kwargs):
if formula.is_true():
return formula, CNFizer.TRUE_CNF
else:
return formula, CNFizer.TRUE_CNF
def walk_int_constant(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
def walk_plus(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
def walk_minus(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
def walk_times(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
def walk_equals(self, formula, args, **kwargs):
assert all(a == CNFizer.THEORY_PLACEHOLDER for a in args)
return formula, CNFizer.TRUE_CNF
def walk_le(self, formula, args, **kwargs):
assert all(a == CNFizer.THEORY_PLACEHOLDER for a in args)
return formula, CNFizer.TRUE_CNF
def walk_lt(self, formula, args, **kwargs):
assert all(a == CNFizer.THEORY_PLACEHOLDER for a in args), str(args)
return formula, CNFizer.TRUE_CNF
def walk_ite(self, formula, args, **kwargs):
if any(a == CNFizer.THEORY_PLACEHOLDER for a in args):
return CNFizer.THEORY_PLACEHOLDER
else:
(i,cnf_i),(t,cnf_t),(e,cnf_e) = args
k = self._key_var(formula)
not_i = self.mgr.Not(i).simplify()
not_t = self.mgr.Not(t).simplify()
not_e = self.mgr.Not(e).simplify()
not_k = self.mgr.Not(k)
return k, (cnf_i | cnf_t | cnf_e |
frozenset([frozenset([not_i, not_t, k]),
frozenset([not_i, t, not_k]),
frozenset([i, not_e, k]),
frozenset([i, e, not_k])]))
def walk_toreal(self, formula, **kwargs):
return CNFizer.THEORY_PLACEHOLDER
class NNFizer(DagWalker):
"""Converts a formula into Negation Normal Form.
The conversion to NNF is handled in 3 steps.
1. The function _get_children is extended, so that for each
expression inside a Not, it will return the effect of propagating
the Not downwards. For example, for Not(And(a, b)), the function
will return [Not(a), Not(b)]. For expressions that are not inside
a Not, it is important to return the same type of arguments. See
for example the case for Iff.
2. The usual walk_* function is implemented to rebuild the
expression. This is called only if the subformula was not negated.
3. walk_not takes care of rebuilding all negated expressions. For
example, for Not(And(a, b)), walk_not will return
Or(Not(a), Not(b)). Notice that args in walk_not contains the
subexpressions returned by _get_children. In the above example,
walk_not will be called with args=[Not(a), Not(b)]. Therefore,
walk_not only need to change the And into a Not.
"""
def __init__(self, environment=None):
DagWalker.__init__(self, env=environment)
self.mgr = self.env.formula_manager
self.set_function(self.walk_theory_relation, *op.RELATIONS)
def convert(self, formula):
""" Converts the given formula in NNF """
return self.walk(formula)
def _get_children(self, formula):
"""Returns the arguments of the node on which an hypotetical recursion
would be made, possibly negating them.
"""
mgr = self.mgr
if formula.is_not():
s = formula.arg(0)
if s.is_not():
return [s.arg(0)]
elif s.is_and():
return [mgr.Not(x) for x in s.args()]
elif s.is_or():
return [mgr.Not(x) for x in s.args()]
elif s.is_implies():
return [s.arg(0), mgr.Not(s.arg(1))]
elif s.is_iff():
return [s.arg(0), s.arg(1),
mgr.Not(s.arg(0)),
mgr.Not(s.arg(1))]
elif s.is_quantifier():
return [mgr.Not(s.arg(0))]
else:
return [s]
elif formula.is_implies():
return [mgr.Not(formula.arg(0)), formula.arg(1)]
elif formula.is_iff():
return [formula.arg(0), formula.arg(1),
mgr.Not(formula.arg(0)),
mgr.Not(formula.arg(1))]
elif formula.is_and() or formula.is_or() or formula.is_quantifier():
return formula.args()
elif formula.is_ite():
# This must be a boolean ITE as we do not recur within
# theory atoms
assert self.env.stc.get_type(formula).is_bool_type()
i, t, e = formula.args()
return [i, mgr.Not(i), t, e]
elif formula.is_symbol():
return []
elif formula.is_bool_constant():
return []
else:
# This is a theory atom
assert formula.is_theory_relation(), str(formula)
return []
def walk_not(self, formula, args, **kwargs):
s = formula.arg(0)
if s.is_symbol():
return self.mgr.Not(s)
elif s.is_not():
return args[0]
elif s.is_and():
return self.mgr.Or(args)
elif s.is_or():
return self.mgr.And(args)
elif s.is_implies():
return self.mgr.And(args)
elif s.is_iff():
a, b, na, nb = args
return self.mgr.Or(self.mgr.And(a, nb),
self.mgr.And(b, na))
elif s.is_forall():
return self.mgr.Exists(s.quantifier_vars(), args[0])
elif s.is_exists():
return self.mgr.ForAll(s.quantifier_vars(), args[0])
else:
return self.mgr.Not(args[0])
def walk_implies(self, formula, args, **kwargs):
return self.mgr.Or(args)
def walk_iff(self, formula, args, **kwargs):
a, b, na, nb = args
return self.mgr.And(self.mgr.Or(na, b),
self.mgr.Or(nb, a))
def walk_and(self, formula, args, **kwargs):
return self.mgr.And(args)
def walk_or(self, formula, args, **kwargs):
return self.mgr.Or(args)
def walk_ite(self, formula, args, **kwargs):
# This must be a boolean ITE as we never add theory atoms in the stack
# See self._get_children()
assert self.env.stc.get_type(formula).is_bool_type()
i, ni, t, e = args
return self.mgr.And(self.mgr.Or(ni, t), self.mgr.Or(i, e))
def walk_forall(self, formula, args, **kwargs):
return self.mgr.ForAll(formula.quantifier_vars(), args[0])
def walk_exists(self, formula, args, **kwargs):
return self.mgr.Exists(formula.quantifier_vars(), args[0])
def walk_symbol(self, formula, **kwargs):
return formula
def walk_bool_constant(self, formula, **kwargs):
return formula
def walk_theory_relation(self, formula, **kwargs):
#pylint: disable=unused-argument
return formula
class PrenexNormalizer(DagWalker):
"""
This class traverses a formula and rebuilds it in prenex normal form.
"""
def __init__(self, env=None, invalidate_memoization=None):
DagWalker.__init__(self,
env=env,
invalidate_memoization=invalidate_memoization)
self.mgr = self.env.formula_manager
self.check_symbol = self.mgr.FreshSymbol(types.BOOL)
# The walker returns a pair (L, m) where m is a
# quantifier-free formula (the matrix) and L is a list of
# pairs (Q, vars) where Q is either mgr.Exists or mgr.ForAll
# and vars is a frozenset of variables. The semantics is that
# the input formula is equivalent to res computed as follows:
# res = m
# for Q, vars in L:
# res = Q(vars, res)
self.set_function(self.walk_error, *op.ALL_TYPES)
self.set_function(self.walk_quantifier, *op.QUANTIFIERS)
self.set_function(self.walk_theory_op, *op.BV_OPERATORS)
self.set_function(self.walk_constant, *op.CONSTANTS)
self.set_function(self.walk_theory_relation, *op.RELATIONS)
self.set_function(self.walk_theory_op, *op.LIRA_OPERATORS)
self.set_function(self.walk_symbol, op.SYMBOL)
self.set_function(self.walk_function, op.FUNCTION)
self.set_function(self.walk_ite, op.ITE)
self.set_function(self.walk_conj_disj, op.AND, op.OR)
self.set_function(self.walk_not, op.NOT)
self.set_function(self.walk_iff, op.IFF)
self.set_function(self.walk_implies, op.IMPLIES)
def normalize(self, formula):
quantifiers, matrix = self.walk(formula)
res = matrix
for Q, qvars in quantifiers:
res = Q(qvars, res)
return res
def _invert_quantifier(self, Q):
if Q == self.mgr.Exists:
return self.mgr.ForAll
return self.mgr.Exists
def walk_symbol(self, formula, **kwargs):
if formula.symbol_type().is_bool_type():
return [],formula
return None
def walk_constant(self, formula, **kwargs):
#pylint: disable=unused-argument
if formula.is_bool_constant():
return [],formula
return None
def walk_conj_disj(self, formula, args, **kwargs):
#pylint: disable=unused-argument
# Hold the final result
quantifiers = []
matrix = []
# A set of variables that are already reserved in the final
# matrix. If we find a quantifier over a variable in this set
# we need to alpha-rename before adding the quantifier to the
# final list and accumulate the matrix.
reserved = formula.get_free_variables()
# We iterate to each argument, each could have a sequence of
# quantifiers that we need to merge
for sub_quantifiers, sub_matrix in args:
# For each quantifier in the alternation
for Q, q_vars in sub_quantifiers:
# These are the variables that need alpha-renaming
needs_rename = q_vars & reserved
if len(needs_rename) > 0:
# we need alpha-renaming: prepare the substitution map
sub = {v : self.mgr.FreshSymbol(v.symbol_type())
for v in needs_rename}
sub_matrix = sub_matrix.substitute(sub)
# The new variables for this quantifiers will be
# its old variables, minus the one needing
# renaming, that are renamed.
new_q_vars = (q_vars - needs_rename)
new_q_vars |= set(sub[x] for x in needs_rename)
else:
# No need to alpha-rename this quantifier, we keep
# as it is the set of variables.
new_q_vars = set(q_vars)
# Store this quantifier in the final result
quantifiers.append((Q, new_q_vars))
# The variables of this quantifier from now on are
# reserved, if another quantifier uses any of them it
# will need alpha-renaming even if this quantifier was
# OK.
reserved |= new_q_vars
# Store the (possibly rewritten) sub_matrix
matrix.append(sub_matrix)
# Build and return the result
if formula.is_and():
return (quantifiers, self.mgr.And(matrix))
if formula.is_or():
return (quantifiers, self.mgr.Or(matrix))
def walk_not(self, formula, args, **kwargs):
quantifiers, matrix = args[0]
nq = [(self._invert_quantifier(Q), qvars) for Q, qvars in quantifiers]
return (nq, self.mgr.Not(matrix))
def walk_iff(self, formula, args, **kwargs):
a, b = formula.args()
i1 = self.mgr.Implies(a, b)
i2 = self.mgr.Implies(b, a)
i1_args = self.walk_implies(i1, [args[0], args[1]])
i2_args = self.walk_implies(i2, [args[1], args[0]])
return self.walk_conj_disj(self.mgr.And(i1, i2), [i1_args, i2_args])
def walk_implies(self, formula, args, **kwargs):
a, b = formula.args()
na = self.mgr.Not(a)
na_arg = self.walk_not(na, [args[0]])
return self.walk_conj_disj(self.mgr.Or(na, b), [na_arg, args[1]])
def walk_ite(self, formula, args, **kwargs):
if any(a is None for a in args):
return None
else:
i, t, e = formula.args()
i_args, t_args, e_args = args
ni = self.mgr.Not(i)
i1 = self.mgr.Implies(i, t)
i2 = self.mgr.Implies(ni, e)
ni_args = self.walk_not(ni, [i_args])
i1_args = self.walk_implies(i1, [i_args, t_args])
i2_args = self.walk_implies(i2, [ni_args, e_args])
return self.walk_conj_disj(self.mgr.And(i1, i2), [i1_args, i2_args])
def walk_theory_relation(self, formula, **kwargs):
#pylint: disable=unused-argument
return [], formula
def walk_quantifier(self, formula, args, **kwargs):
#pylint: disable=unused-argument
quantifiers, matrix = args[0]
qvars = set(v for _, qv in quantifiers for v in qv)
nq = set(formula.quantifier_vars()) - qvars
# If nq is empty, it means that inner quantifiers shadow all
# the variables of this quantifier. Hence this quantifier can
# be removed.
if len(nq) > 0:
if formula.is_exists():
return (quantifiers + [(self.mgr.Exists, nq)]), matrix
else:
return (quantifiers + [(self.mgr.ForAll, nq)]), matrix
return quantifiers, matrix
def walk_theory_op(self, formula, **kwargs):
#pylint: disable=unused-argument
return None
def walk_function(self, formula, **kwargs):
if formula.function_name().symbol_type().return_type.is_bool_type():
return [], formula
return None
class AIGer(DagWalker):
"""Converts a formula into an And-Inverted-Graph."""
def __init__(self, environment=None):
DagWalker.__init__(self, env=environment)
self.mgr = self.env.formula_manager
self.set_function(self.walk_nop, *op.RELATIONS)
self.set_function(self.walk_nop, *op.THEORY_OPERATORS)
self.set_function(self.walk_nop, *op.CONSTANTS)
self.set_function(self.walk_nop, op.SYMBOL, op.FUNCTION)
self.set_function(self.walk_quantifier, *op.QUANTIFIERS)
def convert(self, formula):
""" Converts the given formula in AIG """
return self.walk(formula)
def walk_nop(self, formula, args, **kwargs):
"""We return the Theory subformulae without changes."""
return formula
def walk_quantifier(self, formula, args, **kwargs):
"""Recreate the quantifiers, with the rewritten subformula."""
if formula.is_exists():
return self.mgr.Exists(formula.quantifier_vars(),
args[0])
else:
assert formula.is_forall()
return self.mgr.ForAll(formula.quantifier_vars(),
args[0])
def walk_and(self, formula, args, **kwargs):
return self.mgr.And(*args)
def walk_not(self, formula, args, **kwargs):
return self.mgr.Not(args[0])
def walk_or(self, formula, args, **kwargs):
""" a1 | ... | an = !( !a1 & ... & !an) """
return self.mgr.Not(self.mgr.And(self.mgr.Not(s) for s in args))
def walk_iff(self, formula, args, **kwargs):
""" a <-> b = (!a | b) & (!b | a) = !( a & !b ) & !(b & !a)"""
lhs, rhs = args
r1 = self.mgr.Not(self.mgr.And(lhs, self.mgr.Not(rhs)))
r2 = self.mgr.Not(self.mgr.And(rhs, self.mgr.Not(lhs)))
return self.mgr.And(r1,r2)
def walk_implies(self, formula, args, **kwargs):
""" a -> b = !(a & !b) """
lhs, rhs = args
return self.mgr.Not(self.mgr.And(lhs, self.mgr.Not(rhs)))
def walk_ite(self, formula, args, **kwargs):
"""This rewrites only boolean ITE, not theory ones.
x ? a: b = (x -> a) & (!x -> b) = !(x & !a) & !(!x & !b)
"""
i, t, e = args
if self.env.stc.get_type(t).is_bool_type():
r1 = self.mgr.Not(self.mgr.And(i, self.mgr.Not(t)))
r2 = self.mgr.Not(self.mgr.And(self.mgr.Not(i),
self.mgr.Not(e)))
return self.mgr.And(r1, r2)
else:
return formula
def nnf(formula, environment=None):
"""Converts the given formula in NNF"""
nnfizer = NNFizer(environment)
return nnfizer.convert(formula)
def cnf(formula, environment=None):
"""Converts the given formula in CNF represented as a formula"""
cnfizer = CNFizer(environment)
return cnfizer.convert_as_formula(formula)
def cnf_as_set(formula, environment=None):
"""Converts the given formula in CNF represented as a set of sets"""
cnfizer = CNFizer(environment)
return cnfizer.convert(formula)
def prenex_normal_form(formula, environment=None):
"""Converts the given formula in NNF"""
normalizer = PrenexNormalizer(environment)
return normalizer.normalize(formula)
def aig(formula, environment=None):
"""Converts the given formula in AIG"""
aiger = AIGer(environment)
return aiger.convert(formula)
def conjunctive_partition(formula):
""" Returns a generator over the top-level conjuncts of the given formula
The result is such that for every formula phi, the following holds:
phi <-> And(conjunctive_partition(phi))
"""
to_process = [formula]
seen = set()
while to_process:
cur = to_process.pop()
if cur not in seen:
seen.add(cur)
if cur.is_and():
to_process += cur.args()
else:
yield cur
def disjunctive_partition(formula):
""" Returns a generator over the top-level disjuncts of the given formula
The result is such that for every formula phi, the following holds:
phi <-> Or(conjunctive_partition(phi))
"""
to_process = [formula]
seen = set()
while to_process:
cur = to_process.pop()
if cur not in seen:
seen.add(cur)
if cur.is_or():
to_process += cur.args()
else:
yield cur
|
|
"""
@package mi.dataset.driver.moas.gl.engineering.test.test_driver
@file marine-integrations/mi/dataset/driver/moas/gl/engineering/test/test_driver.py
@author Bill French, Nick Almonte
@brief Test cases for glider ctd data
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from exceptions import Exception
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.idk.exceptions import SampleTimeout
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.parser.glider import DataParticleType
from mi.dataset.parser.glider import EngineeringTelemeteredDataParticle, EngineeringScienceTelemeteredDataParticle
from mi.dataset.parser.glider import EngineeringRecoveredDataParticle, EngineeringScienceRecoveredDataParticle
from mi.dataset.parser.glider import EngineeringMetadataDataParticle, EngineeringMetadataRecoveredDataParticle
from mi.dataset.driver.moas.gl.engineering.driver import EngineeringDataSetDriver
from mi.dataset.driver.moas.gl.engineering.driver import DataTypeKey
TELEMETERED_TEST_DIR = '/tmp/engTelemeteredTest'
RECOVERED_TEST_DIR = '/tmp/engRecoveredTest'
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.moas.gl.engineering.driver',
driver_class="EngineeringDataSetDriver",
agent_resource_id='123xyz',
agent_name='Agent007',
agent_packet_config=EngineeringDataSetDriver.stream_config(),
startup_config={
DataSourceConfigKey.RESOURCE_ID: 'eng',
DataSourceConfigKey.HARVESTER:
{
DataTypeKey.ENG_TELEMETERED:
{
DataSetDriverConfigKeys.DIRECTORY: TELEMETERED_TEST_DIR,
DataSetDriverConfigKeys.STORAGE_DIRECTORY: '/tmp/stored_engTelemeteredTest',
DataSetDriverConfigKeys.PATTERN: '*.mrg',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataTypeKey.ENG_RECOVERED:
{
DataSetDriverConfigKeys.DIRECTORY: RECOVERED_TEST_DIR,
DataSetDriverConfigKeys.STORAGE_DIRECTORY: '/tmp/stored_engRecoveredTest',
DataSetDriverConfigKeys.PATTERN: '*.mrg',
DataSetDriverConfigKeys.FREQUENCY: 1,
}
},
DataSourceConfigKey.PARSER: {
DataTypeKey.ENG_TELEMETERED: {}, DataTypeKey.ENG_RECOVERED: {}
}
}
)
###############################################################################
# UNIT TESTS #
# Device specific unit tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
#
# INTEGRATION TESTS FOR ENGINEERING & SCIENCE DATA PARTICLE
#
## DONE
def test_get(self):
"""
Test that we can get data from files. Verify that the driver sampling
can be started and stopped.
"""
# Start sampling and watch for an exception
self.driver.start_sampling()
log.debug("started sampling")
self.clear_async_data()
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'single_glider_record-engDataOnly.mrg.result.yml', count=3, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'single_glider_record_recovered-engDataOnly.mrg.result.yml', count=3, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'multiple_glider_record-engDataOnly.mrg.result.yml', count=9, timeout=10)
self.clear_async_data()
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Results file, Number of Particles (rows) expected to compare, timeout
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'multiple_glider_record_recovered-engDataOnly.mrg.result.yml', count=9, timeout=10)
# log.debug("IntegrationTest.test_get(): Start second file ingestion - Telemetered")
self.clear_async_data()
self.create_sample_data_set_dir('unit_247_2012_051_0_0-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-unit_247_2012_051_0_0-engDataOnly.mrg")
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
count=101, timeout=30)
# log.debug("IntegrationTest.test_get(): Start second file ingestion - Recovered")
self.clear_async_data()
self.create_sample_data_set_dir('unit_247_2012_051_0_0-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-unit_247_2012_051_0_0-engDataOnly.mrg")
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
count=101, timeout=30)
##DONE
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
path_1 = self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
path_1a = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
path_2 = self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
path_2a = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Create and store the new driver state
state = {
DataTypeKey.ENG_TELEMETERED: {
'CopyOf-single_glider_record-engDataOnly.mrg': self.get_file_state(path_1, True, 1160),
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path_1a, False, 10895)
},
DataTypeKey.ENG_RECOVERED: {
'CopyOf-single_glider_record-engDataOnly.mrg': self.get_file_state(path_2, True, 1160),
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path_2a, False, 12593)
}
}
log.debug(" ############################# TEST_STOP_RESUME - State = %s", state)
state[DataTypeKey.ENG_TELEMETERED]['CopyOf-single_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = True
state[DataTypeKey.ENG_TELEMETERED]['CopyOf-multiple_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = False
state[DataTypeKey.ENG_RECOVERED]['CopyOf-single_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = True
state[DataTypeKey.ENG_RECOVERED]['CopyOf-multiple_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = True
self.driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced for telemetered particles
self.assert_data((EngineeringMetadataDataParticle,
EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'merged_glider_record-engDataOnly.mrg.result.yml', count=7, timeout=10)
# verify data is produced for recovered particles - parse last two rows or data, reuse yml from bad_sample...
self.assert_data((EngineeringMetadataRecoveredDataParticle,
EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'bad_sample_engineering_record_recovered.mrg.result.yml', count=4, timeout=10)
##DONE
def test_stop_start_ingest_telemetered(self):
"""
Test the ability to stop and restart sampling, and ingesting files in the correct order
"""
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"xCopyOf-multiple_glider_record-engDataOnly.mrg")
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'single_glider_record-engDataOnly.mrg.result.yml', count=3, timeout=10)
self.assert_file_ingested("CopyOf-single_glider_record-engDataOnly.mrg", DataTypeKey.ENG_TELEMETERED)
self.assert_file_not_ingested("xCopyOf-multiple_glider_record-engDataOnly.mrg")
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'multiple_glider_record-engDataOnly.mrg.result.yml', count=9, timeout=10)
self.assert_file_ingested("xCopyOf-multiple_glider_record-engDataOnly.mrg", DataTypeKey.ENG_TELEMETERED)
def test_stop_start_ingest_recovered(self):
"""
Test the ability to stop and restart sampling, and ingesting files in the correct order
"""
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"xCopyOf-multiple_glider_record-engDataOnly.mrg")
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'single_glider_record_recovered-engDataOnly.mrg.result.yml', count=3, timeout=10)
self.assert_file_ingested("CopyOf-single_glider_record-engDataOnly.mrg", DataTypeKey.ENG_RECOVERED)
self.assert_file_not_ingested("xCopyOf-multiple_glider_record-engDataOnly.mrg")
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'multiple_glider_record_recovered-engDataOnly.mrg.result.yml', count=9, timeout=10)
self.assert_file_ingested("xCopyOf-multiple_glider_record-engDataOnly.mrg", DataTypeKey.ENG_RECOVERED)
##DONE
def test_bad_sample_telemetered(self):
"""
Test a bad sample. To do this we set a state to the middle of a record
"""
# create some data to parse
self.clear_async_data()
path = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
path_2 = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Create and store the new driver state
state = {
DataTypeKey.ENG_TELEMETERED: {
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path, False, 12593)
},
DataTypeKey.ENG_RECOVERED: {
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path_2, False, 12593)
}
}
state[DataTypeKey.ENG_TELEMETERED]['CopyOf-multiple_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = True
self.driver = self._get_driver_object(memento=state)
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data((EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
'bad_sample_engineering_record.mrg.result.yml', count=2, timeout=10)
##DONE
def test_bad_sample_recovered(self):
"""
Test a bad sample. To do this we set a state to the middle of a record
"""
# create some data to parse
self.clear_async_data()
path = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
path_2 = self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Create and store the new driver state
state = {
DataTypeKey.ENG_TELEMETERED: {
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path, False, 12593)
},
DataTypeKey.ENG_RECOVERED: {
'CopyOf-multiple_glider_record-engDataOnly.mrg': self.get_file_state(path_2, False, 12593)
}
}
state[DataTypeKey.ENG_RECOVERED]['CopyOf-multiple_glider_record-engDataOnly.mrg']['parser_state']['sent_metadata'] = True
self.driver = self._get_driver_object(memento=state)
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data((EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
'bad_sample_engineering_record_recovered.mrg.result.yml', count=4, timeout=10)
##DONE
def test_sample_exception_telemetered(self):
"""
test that a file is marked as parsed if it has a sample exception (which will happen with an empty file)
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester'][DataTypeKey.ENG_TELEMETERED]['pattern']
filename = config.replace("*", "foo")
self.create_sample_data_set_dir(filename, TELEMETERED_TEST_DIR)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename, DataTypeKey.ENG_TELEMETERED)
##DONE
def test_sample_exception_recovered(self):
"""
test that a file is marked as parsed if it has a sample exception (which will happen with an empty file)
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester'][DataTypeKey.ENG_RECOVERED]['pattern']
filename = config.replace("*", "foo")
self.create_sample_data_set_dir(filename, RECOVERED_TEST_DIR)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename, DataTypeKey.ENG_RECOVERED)
##DONE
def test_fileopen_str_parse_telemetered(self):
"""
Test that we can parse a fileopen string that has a single digit day
replaced with an underscore.
"""
path = self.create_sample_data_set_dir('unit_363_2013_245_6_6.mrg', TELEMETERED_TEST_DIR)
# Start sampling
self.driver.start_sampling()
self.assert_data((EngineeringMetadataDataParticle, EngineeringScienceTelemeteredDataParticle,
EngineeringTelemeteredDataParticle),
None, count=153, timeout=30)
self.assert_file_ingested('unit_363_2013_245_6_6.mrg', DataTypeKey.ENG_TELEMETERED)
##DONE
def test_fileopen_str_parse_recovered(self):
"""
Test that we can parse a fileopen string that has a single digit day
replaced with an underscore.
"""
path = self.create_sample_data_set_dir('unit_363_2013_245_6_6.mrg', RECOVERED_TEST_DIR)
# Start sampling
self.driver.start_sampling()
self.assert_data((EngineeringMetadataRecoveredDataParticle, EngineeringScienceRecoveredDataParticle,
EngineeringRecoveredDataParticle),
None, count=153, timeout=30)
self.assert_file_ingested('unit_363_2013_245_6_6.mrg', DataTypeKey.ENG_RECOVERED)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
#
# QUAL TESTS FOR ENGINEERING & SCIENCE DATA PARTICLE
#
##DONE
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
'CopyOf-single_glider_record-engDataOnly.mrg')
self.assert_initialize()
# Verify we get one sample
try:
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED)
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_publish_path(): RESULT: %s", result0)
# Verify values in the combined result
self.assert_data_values(result0, 'single_glider_record-engDataOnly.mrg.result.yml')
except Exception as e:
log.error("## QualificationTest.test_publish_path(): Exception trapped: %s", e)
self.fail("Sample timeout.")
# Again for the recovered particles
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
RECOVERED_TEST_DIR,
'CopyOf-single_glider_record-engDataOnly.mrg')
# Verify we get one sample
try:
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_RECOVERED)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_RECOVERED)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA_RECOVERED)
# append result2 to result 1
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_publish_path(): RESULT: %s", result0)
# Verify values in the combined result
self.assert_data_values(result0, 'single_glider_record_recovered-engDataOnly.mrg.result.yml')
except Exception as e:
log.error("## QualificationTest.test_publish_path(): Exception trapped: %s", e)
self.fail("Sample timeout.")
##DONE
def test_separate_particles(self):
"""
Input file has eng particle data in the first two data rows but no eng_sci data, and the next (and last)
two rows have eng_sci data but no eng data. This test ensures the parser can deliver a single particle
of each type.
"""
self.create_sample_data_set_dir('eng_data_separate.mrg',
TELEMETERED_TEST_DIR,
'CopyOf-eng_data_separate.mrg')
self.assert_initialize()
# Verify we get one sample
try:
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA, 1)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 2)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 2)
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_separate_particles(): RESULT: %s", result0)
# Verify values in the combined result
self.assert_data_values(result0, 'eng_data_separate.mrg.result.yml')
except Exception as e:
log.error("## QualificationTest.test_separate_particles(): Exception trapped: %s", e)
self.fail("## QualificationTest.test_separate_particles(): Sample timeout.")
##DONE
def test_large_import_telemetered(self):
"""
There is a bug when activating an instrument go_active times out and
there was speculation this was due to blocking behavior in the agent.
https://jira.oceanobservatories.org/tasks/browse/OOIION-1284
"""
self.create_sample_data_set_dir('unit_247_2012_051_0_0-engDataOnly.mrg', TELEMETERED_TEST_DIR)
self.assert_initialize()
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA, 1)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 100, 240)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 3, 240)
##DONE
def test_large_import_recovered(self):
"""
There is a bug when activating an instrument go_active times out and
there was speculation this was due to blocking behavior in the agent.
https://jira.oceanobservatories.org/tasks/browse/OOIION-1284
"""
self.create_sample_data_set_dir('unit_247_2012_051_0_0-engDataOnly.mrg', RECOVERED_TEST_DIR)
self.assert_initialize()
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA_RECOVERED, 1)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_RECOVERED, 100, 240)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_RECOVERED, 3, 240)
##DONE
def test_shutdown_restart_telemetered(self):
"""
Test the agents ability to completely stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED)
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_shutdown_restart(): RESULT: %s", result0)
# Verify values
self.assert_data_values(result0, 'single_glider_record-engDataOnly.mrg.result.yml')
###self.assert_all_queue_size_zero()
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Now read the first three records of the second file then stop
result0 = self.get_samples(DataParticleType.GLIDER_ENG_METADATA, 1)
result1 = self.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 1)
result2 = self.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 1)
self.assert_stop_sampling()
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_shutdown_restart(): got result 1 %s", result0)
self.assert_data_values(result0, 'single_glider_record-engDataOnly-StartStopQual.mrg.result.yml')
###self.assert_all_queue_size_zero()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_METADATA)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_TELEMETERED)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_SCI_TELEMETERED)
# Restart sampling and ensure we get the last 3 records of the file
self.assert_start_sampling()
result1 = self.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 3)
result2 = self.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 3)
self.assert_stop_sampling()
# append result2 (the eng sci particle) to result 1 (the eng particle)
result1.extend(result2)
log.debug("##")
log.debug("##")
log.debug("## SHOULD BE ROWS 3 - 4 ")
log.debug("##")
log.debug("##")
log.debug("## QualificationTest.test_shutdown_restart_telemetered(): got remaining combined result %s", result1)
log.debug("##")
log.debug("##")
self.assert_data_values(result1, 'shutdownrestart_glider_record-engDataOnly.mrg.result.yml')
except SampleTimeout as e:
log.error("## QualificationTest.test_shutdown_restart(): Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
##DONE
def test_shutdown_restart_recovered(self):
"""
Test the agents ability to completely stop, then restart
at the correct spot.
"""
log.info("test_shutdown_restart_recovered(): CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('single_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-single_glider_record-engDataOnly.mrg")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
result0 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_METADATA)
result1 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED)
result2 = self.data_subscribers.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED)
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_shutdown_restart_recovered(): RESULT: %s", result0)
# Verify values
self.assert_data_values(result0, 'single_glider_record-engDataOnly.mrg.result.yml')
###self.assert_all_queue_size_zero()
self.create_sample_data_set_dir('multiple_glider_record-engDataOnly.mrg',
TELEMETERED_TEST_DIR,
"CopyOf-multiple_glider_record-engDataOnly.mrg")
# Now read the first three records of the second file then stop
result0 = self.get_samples(DataParticleType.GLIDER_ENG_METADATA, 1)
result1 = self.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 1)
result2 = self.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 1)
self.assert_stop_sampling()
# append result2 (the eng sci particle) to result 1 (the eng particle)
result0.extend(result1)
result0.extend(result2)
log.debug("## QualificationTest.test_shutdown_restart_recovered(): got result 1 %s", result0)
self.assert_data_values(result0, 'single_glider_record-engDataOnly-StartStopQual.mrg.result.yml')
###self.assert_all_queue_size_zero()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_METADATA)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_TELEMETERED)
self.data_subscribers.clear_sample_queue(DataParticleType.GLIDER_ENG_SCI_TELEMETERED)
# Restart sampling and ensure we get the last 3 records of the file
self.assert_start_sampling()
result1 = self.get_samples(DataParticleType.GLIDER_ENG_TELEMETERED, 3)
result2 = self.get_samples(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 3)
# append result2 (the eng sci particle) to result 1 (the eng particle)
result1.extend(result2)
log.debug("##")
log.debug("##")
log.debug("## SHOULD BE ROWS 3 - 4 ")
log.debug("##")
log.debug("##")
log.debug("## QualificationTest.test_shutdown_restart_recovered(): got remaining combined result %s", result1)
log.debug("##")
log.debug("##")
self.assert_data_values(result1, 'shutdownrestart_glider_record-engDataOnly.mrg.result.yml')
except SampleTimeout as e:
log.error("## QualificationTest.test_shutdown_restart_recovered(): Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
##DONE
def test_parser_exception(self):
"""
Test an exception raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data_set_dir('non-input_file.mrg',
TELEMETERED_TEST_DIR,
'unit_247_2012_051_9_9.mrg')
self.assert_initialize()
self.event_subscribers.clear_events()
self.assert_all_queue_size_zero()
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 40)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
# # cause the same error for recovered
self.event_subscribers.clear_events()
self.clear_sample_data()
self.create_sample_data_set_dir('non-input_file.mrg',
RECOVERED_TEST_DIR,
"unit_363_2013_245_7_8.mrg")
self.assert_all_queue_size_zero_recovered()
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 40)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
def assert_all_queue_size_zero(self):
"""
make sure all 3 queues have no samples and are size 0
"""
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_METADATA, 0)
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_SCI_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_TELEMETERED, 0)
def assert_all_queue_size_zero_recovered(self):
"""
make sure all 3 queues have no samples and are size 0
"""
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_METADATA_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_SCI_RECOVERED, 0)
self.assert_sample_queue_size(DataParticleType.GLIDER_ENG_RECOVERED, 0)
|
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_stp
version_added: "2.4"
short_description: Manages STP configuration on HUAWEI CloudEngine switches.
description:
- Manages STP configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present', 'absent']
stp_mode:
description:
- Set an operation mode for the current MSTP process.
The mode can be STP, RSTP, or MSTP.
required: false
default: null
choices: ['stp', 'rstp', 'mstp']
stp_enable:
description:
- Enable or disable STP on a switch.
required: false
default: null
choices: ['enable', 'disable']
stp_converge:
description:
- STP convergence mode.
Fast means set STP aging mode to Fast.
Normal means set STP aging mode to Normal.
required: false
default: null
choices: ['fast', 'normal']
bpdu_protection:
description:
- Configure BPDU protection on an edge port.
This function prevents network flapping caused by attack packets.
required: false
default: null
choices: ['enable', 'disable']
tc_protection:
description:
- Configure the TC BPDU protection function for an MSTP process.
required: false
default: null
choices: ['enable', 'disable']
tc_protection_interval:
description:
- Set the time the MSTP device takes to handle the maximum number of TC BPDUs
and immediately refresh forwarding entries.
The value is an integer ranging from 1 to 600, in seconds.
required: false
default: null
tc_protection_threshold:
description:
- Set the maximum number of TC BPDUs that the MSTP can handle.
The value is an integer ranging from 1 to 255. The default value is 1 on the switch.
required: false
default: null
interface:
description:
- Interface name.
If the value is C(all), will apply configuration to all interfaces.
if the value is a special name, only support input the full name.
required: false
default: null
edged_port:
description:
- Set the current port as an edge port.
required: false
default: null
choices: ['enable', 'disable']
bpdu_filter:
description:
- Specify a port as a BPDU filter port.
required: false
default: null
choices: ['enable', 'disable']
cost:
description:
- Set the path cost of the current port.
The default instance is 0.
required: false
default: null
root_protection:
description:
- Enable root protection on the current port.
required: false
default: null
choices: ['enable', 'disable']
loop_protection:
description:
- Enable loop protection on the current port.
required: false
default: null
choices: ['enable', 'disable']
'''
EXAMPLES = '''
- name: CloudEngine stp test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config stp mode"
ce_stp:
state: present
stp_mode: stp
provider: "{{ cli }}"
- name: "Undo stp mode"
ce_stp:
state: absent
stp_mode: stp
provider: "{{ cli }}"
- name: "Enable bpdu protection"
ce_stp:
state: present
bpdu_protection: enable
provider: "{{ cli }}"
- name: "Disable bpdu protection"
ce_stp:
state: present
bpdu_protection: disable
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"bpdu_protection": "enable",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bpdu_protection": "disable"}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bpdu_protection": "enable"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["stp bpdu-protection"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class Stp(object):
""" Manages stp/rstp/mstp configuration """
def __init__(self, **kwargs):
""" Stp module init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
self.stp_cfg = None
self.interface_stp_cfg = None
# module args
self.state = self.module.params['state'] or None
self.stp_mode = self.module.params['stp_mode'] or None
self.stp_enable = self.module.params['stp_enable'] or None
self.stp_converge = self.module.params['stp_converge'] or None
self.interface = self.module.params['interface'] or None
self.edged_port = self.module.params['edged_port'] or None
self.bpdu_filter = self.module.params['bpdu_filter'] or None
self.cost = self.module.params['cost'] or None
self.bpdu_protection = self.module.params['bpdu_protection'] or None
self.tc_protection = self.module.params['tc_protection'] or None
self.tc_protection_interval = self.module.params['tc_protection_interval'] or None
self.tc_protection_threshold = self.module.params['tc_protection_threshold'] or None
self.root_protection = self.module.params['root_protection'] or None
self.loop_protection = self.module.params['loop_protection'] or None
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def cli_load_config(self, commands):
""" Cli load configuration """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_stp_config(self):
""" Cli get stp configuration """
regular = "| include stp"
flags = list()
flags.append(regular)
self.stp_cfg = get_config(self.module, flags)
def cli_get_interface_stp_config(self):
""" Cli get interface's stp configuration """
if self.interface:
regular = "| ignore-case section include ^interface %s$" % self.interface
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
if not tmp_cfg:
self.module.fail_json(
msg='Error: The interface %s is not exist.' % self.interface)
if "undo portswitch" in tmp_cfg:
self.module.fail_json(
msg='Error: The interface %s is not switch mode.' % self.interface)
self.interface_stp_cfg = tmp_cfg
def check_params(self):
""" Check module params """
if self.cost:
if self.cost.isdigit():
if int(self.cost) < 1 or int(self.cost) > 200000000:
self.module.fail_json(
msg='Error: The value of cost is out of [1 - 200000000].')
else:
self.module.fail_json(
msg='Error: The cost is not digit.')
if self.tc_protection_interval:
if self.tc_protection_interval.isdigit():
if int(self.tc_protection_interval) < 1 or int(self.tc_protection_interval) > 600:
self.module.fail_json(
msg='Error: The value of tc_protection_interval is out of [1 - 600].')
else:
self.module.fail_json(
msg='Error: The tc_protection_interval is not digit.')
if self.tc_protection_threshold:
if self.tc_protection_threshold.isdigit():
if int(self.tc_protection_threshold) < 1 or int(self.tc_protection_threshold) > 255:
self.module.fail_json(
msg='Error: The value of tc_protection_threshold is out of [1 - 255].')
else:
self.module.fail_json(
msg='Error: The tc_protection_threshold is not digit.')
if self.root_protection or self.loop_protection or self.cost:
if not self.interface:
self.module.fail_json(
msg='Error: Please input interface.')
elif self.interface == "all":
self.module.fail_json(
msg='Error: Interface can not be all when config root_protection or loop_protection or cost.')
if self.root_protection and self.root_protection == "enable":
if self.loop_protection and self.loop_protection == "enable":
self.module.fail_json(
msg='Error: Can not enable root_protection and loop_protection at the same interface.')
if self.edged_port or self.bpdu_filter:
if not self.interface:
self.module.fail_json(
msg='Error: Please input interface.')
def get_proposed(self):
""" Get module proposed """
self.proposed["state"] = self.state
if self.stp_mode:
self.proposed["stp_mode"] = self.stp_mode
if self.stp_enable:
self.proposed["stp_enable"] = self.stp_enable
if self.stp_converge:
self.proposed["stp_converge"] = self.stp_converge
if self.interface:
self.proposed["interface"] = self.interface
if self.edged_port:
self.proposed["edged_port"] = self.edged_port
if self.bpdu_filter:
self.proposed["bpdu_filter"] = self.bpdu_filter
if self.cost:
self.proposed["cost"] = self.cost
if self.bpdu_protection:
self.proposed["bpdu_protection"] = self.bpdu_protection
if self.tc_protection:
self.proposed["tc_protection"] = self.tc_protection
if self.tc_protection_interval:
self.proposed["tc_protection_interval"] = self.tc_protection_interval
if self.tc_protection_threshold:
self.proposed["tc_protection_threshold"] = self.tc_protection_threshold
if self.root_protection:
self.proposed["root_protection"] = self.root_protection
if self.loop_protection:
self.proposed["loop_protection"] = self.loop_protection
def get_existing(self):
""" Get existing configuration """
self.cli_get_stp_config()
if self.interface and self.interface != "all":
self.cli_get_interface_stp_config()
if self.stp_mode:
if "stp mode stp" in self.stp_cfg:
self.cur_cfg["stp_mode"] = "stp"
self.existing["stp_mode"] = "stp"
elif "stp mode rstp" in self.stp_cfg:
self.cur_cfg["stp_mode"] = "rstp"
self.existing["stp_mode"] = "rstp"
else:
self.cur_cfg["stp_mode"] = "mstp"
self.existing["stp_mode"] = "mstp"
if self.stp_enable:
if "stp disable" in self.stp_cfg:
self.cur_cfg["stp_enable"] = "disable"
self.existing["stp_enable"] = "disable"
else:
self.cur_cfg["stp_enable"] = "enable"
self.existing["stp_enable"] = "enable"
if self.stp_converge:
if "stp converge fast" in self.stp_cfg:
self.cur_cfg["stp_converge"] = "fast"
self.existing["stp_converge"] = "fast"
else:
self.cur_cfg["stp_converge"] = "normal"
self.existing["stp_converge"] = "normal"
if self.edged_port:
if self.interface == "all":
if "stp edged-port default" in self.stp_cfg:
self.cur_cfg["edged_port"] = "enable"
self.existing["edged_port"] = "enable"
else:
self.cur_cfg["edged_port"] = "disable"
self.existing["edged_port"] = "disable"
else:
if "stp edged-port enable" in self.interface_stp_cfg:
self.cur_cfg["edged_port"] = "enable"
self.existing["edged_port"] = "enable"
else:
self.cur_cfg["edged_port"] = "disable"
self.existing["edged_port"] = "disable"
if self.bpdu_filter:
if self.interface == "all":
if "stp bpdu-filter default" in self.stp_cfg:
self.cur_cfg["bpdu_filter"] = "enable"
self.existing["bpdu_filter"] = "enable"
else:
self.cur_cfg["bpdu_filter"] = "disable"
self.existing["bpdu_filter"] = "disable"
else:
if "stp bpdu-filter enable" in self.interface_stp_cfg:
self.cur_cfg["bpdu_filter"] = "enable"
self.existing["bpdu_filter"] = "enable"
else:
self.cur_cfg["bpdu_filter"] = "disable"
self.existing["bpdu_filter"] = "disable"
if self.bpdu_protection:
if "stp bpdu-protection" in self.stp_cfg:
self.cur_cfg["bpdu_protection"] = "enable"
self.existing["bpdu_protection"] = "enable"
else:
self.cur_cfg["bpdu_protection"] = "disable"
self.existing["bpdu_protection"] = "disable"
if self.tc_protection:
if "stp tc-protection" in self.stp_cfg:
self.cur_cfg["tc_protection"] = "enable"
self.existing["tc_protection"] = "enable"
else:
self.cur_cfg["tc_protection"] = "disable"
self.existing["tc_protection"] = "disable"
if self.tc_protection_interval:
if "stp tc-protection interval" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection interval on the device.')
self.cur_cfg["tc_protection_interval"] = tmp_value[0]
self.existing["tc_protection_interval"] = tmp_value[0]
else:
self.cur_cfg["tc_protection_interval"] = "null"
self.existing["tc_protection_interval"] = "null"
if self.tc_protection_threshold:
if "stp tc-protection threshold" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection threshold on the device.')
self.cur_cfg["tc_protection_threshold"] = tmp_value[0]
self.existing["tc_protection_threshold"] = tmp_value[0]
else:
self.cur_cfg["tc_protection_threshold"] = "1"
self.existing["tc_protection_threshold"] = "1"
if self.cost:
tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg)
if not tmp_value:
self.cur_cfg["cost"] = "null"
self.existing["cost"] = "null"
else:
self.cur_cfg["cost"] = tmp_value[0][1]
self.existing["cost"] = tmp_value[0][1]
# root_protection and loop_protection should get configuration at the same time
if self.root_protection or self.loop_protection:
if "stp root-protection" in self.interface_stp_cfg:
self.cur_cfg["root_protection"] = "enable"
self.existing["root_protection"] = "enable"
else:
self.cur_cfg["root_protection"] = "disable"
self.existing["root_protection"] = "disable"
if "stp loop-protection" in self.interface_stp_cfg:
self.cur_cfg["loop_protection"] = "enable"
self.existing["loop_protection"] = "enable"
else:
self.cur_cfg["loop_protection"] = "disable"
self.existing["loop_protection"] = "disable"
def get_end_state(self):
""" Get end state """
self.cli_get_stp_config()
if self.interface and self.interface != "all":
self.cli_get_interface_stp_config()
if self.stp_mode:
if "stp mode stp" in self.stp_cfg:
self.end_state["stp_mode"] = "stp"
elif "stp mode rstp" in self.stp_cfg:
self.end_state["stp_mode"] = "rstp"
else:
self.end_state["stp_mode"] = "mstp"
if self.stp_enable:
if "stp disable" in self.stp_cfg:
self.end_state["stp_enable"] = "disable"
else:
self.end_state["stp_enable"] = "enable"
if self.stp_converge:
if "stp converge fast" in self.stp_cfg:
self.end_state["stp_converge"] = "fast"
else:
self.end_state["stp_converge"] = "normal"
if self.edged_port:
if self.interface == "all":
if "stp edged-port default" in self.stp_cfg:
self.end_state["edged_port"] = "enable"
else:
self.end_state["edged_port"] = "disable"
else:
if "stp edged-port enable" in self.interface_stp_cfg:
self.end_state["edged_port"] = "enable"
else:
self.end_state["edged_port"] = "disable"
if self.bpdu_filter:
if self.interface == "all":
if "stp bpdu-filter default" in self.stp_cfg:
self.end_state["bpdu_filter"] = "enable"
else:
self.end_state["bpdu_filter"] = "disable"
else:
if "stp bpdu-filter enable" in self.interface_stp_cfg:
self.end_state["bpdu_filter"] = "enable"
else:
self.end_state["bpdu_filter"] = "disable"
if self.bpdu_protection:
if "stp bpdu-protection" in self.stp_cfg:
self.end_state["bpdu_protection"] = "enable"
else:
self.end_state["bpdu_protection"] = "disable"
if self.tc_protection:
if "stp tc-protection" in self.stp_cfg:
self.end_state["tc_protection"] = "enable"
else:
self.end_state["tc_protection"] = "disable"
if self.tc_protection_interval:
if "stp tc-protection interval" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection interval (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection interval on the device.')
self.end_state["tc_protection_interval"] = tmp_value[0]
else:
self.end_state["tc_protection_interval"] = "null"
if self.tc_protection_threshold:
if "stp tc-protection threshold" in self.stp_cfg:
tmp_value = re.findall(r'stp tc-protection threshold (.*)', self.stp_cfg)
if not tmp_value:
self.module.fail_json(
msg='Error: Can not find tc-protection threshold on the device.')
self.end_state["tc_protection_threshold"] = tmp_value[0]
else:
self.end_state["tc_protection_threshold"] = "1"
if self.cost:
tmp_value = re.findall(r'stp instance (.*) cost (.*)', self.interface_stp_cfg)
if not tmp_value:
self.end_state["cost"] = "null"
else:
self.end_state["cost"] = tmp_value[0][1]
if self.root_protection:
if "stp root-protection" in self.interface_stp_cfg:
self.end_state["root_protection"] = "enable"
else:
self.end_state["root_protection"] = "disable"
if self.loop_protection:
if "stp loop-protection" in self.interface_stp_cfg:
self.end_state["loop_protection"] = "enable"
else:
self.end_state["loop_protection"] = "disable"
def present_stp(self):
""" Present stp configuration """
cmds = list()
# cofig stp global
if self.stp_mode:
if self.stp_mode != self.cur_cfg["stp_mode"]:
cmd = "stp mode %s" % self.stp_mode
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_enable:
if self.stp_enable != self.cur_cfg["stp_enable"]:
cmd = "stp %s" % self.stp_enable
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_converge:
if self.stp_converge != self.cur_cfg["stp_converge"]:
cmd = "stp converge %s" % self.stp_converge
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.interface == "all":
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_filter:
if self.interface == "all":
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_protection:
if self.bpdu_protection != self.cur_cfg["bpdu_protection"]:
if self.bpdu_protection == "enable":
cmd = "stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection:
if self.tc_protection != self.cur_cfg["tc_protection"]:
if self.tc_protection == "enable":
cmd = "stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_interval:
if self.tc_protection_interval != self.cur_cfg["tc_protection_interval"]:
cmd = "stp tc-protection interval %s" % self.tc_protection_interval
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_threshold:
if self.tc_protection_threshold != self.cur_cfg["tc_protection_threshold"]:
cmd = "stp tc-protection threshold %s" % self.tc_protection_threshold
cmds.append(cmd)
self.updates_cmd.append(cmd)
# config interface stp
if self.interface and self.interface != "all":
tmp_changed = False
cmd = "interface %s" % self.interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp edged-port"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.bpdu_filter:
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp bpdu-filter"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.root_protection:
if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable loop_protection, can not enable root_protection.')
if self.root_protection != self.cur_cfg["root_protection"]:
if self.root_protection == "enable":
cmd = "stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.loop_protection:
if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable root_protection, can not enable loop_protection.')
if self.loop_protection != self.cur_cfg["loop_protection"]:
if self.loop_protection == "enable":
cmd = "stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.cost:
if self.cost != self.cur_cfg["cost"]:
cmd = "stp cost %s" % self.cost
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if not tmp_changed:
cmd = "interface %s" % self.interface
self.updates_cmd.remove(cmd)
cmds.remove(cmd)
if cmds:
self.cli_load_config(cmds)
self.changed = True
def absent_stp(self):
""" Absent stp configuration """
cmds = list()
if self.stp_mode:
if self.stp_mode == self.cur_cfg["stp_mode"]:
if self.stp_mode != "mstp":
cmd = "undo stp mode"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.stp_enable:
if self.stp_enable != self.cur_cfg["stp_enable"]:
cmd = "stp %s" % self.stp_enable
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.stp_converge:
if self.stp_converge == self.cur_cfg["stp_converge"]:
cmd = "undo stp converge"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.edged_port:
if self.interface == "all":
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp edged-port default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_filter:
if self.interface == "all":
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-filter default"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.bpdu_protection:
if self.bpdu_protection != self.cur_cfg["bpdu_protection"]:
if self.bpdu_protection == "enable":
cmd = "stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp bpdu-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection:
if self.tc_protection != self.cur_cfg["tc_protection"]:
if self.tc_protection == "enable":
cmd = "stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
else:
cmd = "undo stp tc-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.tc_protection_interval:
if self.tc_protection_interval == self.cur_cfg["tc_protection_interval"]:
cmd = "undo stp tc-protection interval"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
if self.tc_protection_threshold:
if self.tc_protection_threshold == self.cur_cfg["tc_protection_threshold"]:
if self.tc_protection_threshold != "1":
cmd = "undo stp tc-protection threshold"
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.changed = True
# undo interface stp
if self.interface and self.interface != "all":
tmp_changed = False
cmd = "interface %s" % self.interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.edged_port:
if self.edged_port != self.cur_cfg["edged_port"]:
if self.edged_port == "enable":
cmd = "stp edged-port enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp edged-port"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.bpdu_filter:
if self.bpdu_filter != self.cur_cfg["bpdu_filter"]:
if self.bpdu_filter == "enable":
cmd = "stp bpdu-filter enable"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp bpdu-filter"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.root_protection:
if self.root_protection == "enable" and self.cur_cfg["loop_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable loop_protection, can not enable root_protection.')
if self.root_protection != self.cur_cfg["root_protection"]:
if self.root_protection == "enable":
cmd = "stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp root-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.loop_protection:
if self.loop_protection == "enable" and self.cur_cfg["root_protection"] == "enable":
self.module.fail_json(
msg='Error: The interface has enable root_protection, can not enable loop_protection.')
if self.loop_protection != self.cur_cfg["loop_protection"]:
if self.loop_protection == "enable":
cmd = "stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
else:
cmd = "undo stp loop-protection"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if self.cost:
if self.cost == self.cur_cfg["cost"]:
cmd = "undo stp cost"
cmds.append(cmd)
self.updates_cmd.append(cmd)
tmp_changed = True
if not tmp_changed:
cmd = "interface %s" % self.interface
self.updates_cmd.remove(cmd)
cmds.remove(cmd)
if cmds:
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Work function """
self.check_params()
self.get_proposed()
self.get_existing()
if self.state == "present":
self.present_stp()
else:
self.absent_stp()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
stp_mode=dict(choices=['stp', 'rstp', 'mstp']),
stp_enable=dict(choices=['enable', 'disable']),
stp_converge=dict(choices=['fast', 'normal']),
bpdu_protection=dict(choices=['enable', 'disable']),
tc_protection=dict(choices=['enable', 'disable']),
tc_protection_interval=dict(type='str'),
tc_protection_threshold=dict(type='str'),
interface=dict(type='str'),
edged_port=dict(choices=['enable', 'disable']),
bpdu_filter=dict(choices=['enable', 'disable']),
cost=dict(type='str'),
root_protection=dict(choices=['enable', 'disable']),
loop_protection=dict(choices=['enable', 'disable'])
)
argument_spec.update(ce_argument_spec)
module = Stp(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from django.conf import settings
from django.forms import ValidationError # noqa
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
PROJECT_REQUIRED = api.keystone.VERSIONS.active < 3
class PasswordMixin(forms.SelfHandlingForm):
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
no_autocomplete = True
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data and 'confirm_password' in data:
if data['password'] != data['confirm_password']:
raise ValidationError(_('Passwords do not match.'))
return data
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
# Populate project choices
project_choices = []
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
domain_id = kwargs['initial'].get('domain_id', None)
projects, has_more = api.keystone.tenant_list(request,
domain=domain_id,
user=user_id)
for project in projects:
if project.enabled:
project_choices.append((project.id, project.name))
if not project_choices:
project_choices.insert(0, ('', _("No available projects")))
elif len(project_choices) > 1:
project_choices.insert(0, ('', _("Select a project")))
self.fields['project'].choices = project_choices
ADD_PROJECT_URL = "horizon:identity:projects:create"
class CreateUserForm(PasswordMixin, BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("User Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
email = forms.EmailField(
label=_("Email"),
required=False)
project = forms.DynamicChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED,
add_item_link=ADD_PROJECT_URL)
role_id = forms.ChoiceField(label=_("Role"),
required=PROJECT_REQUIRED)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
# Reorder form fields from multiple inheritance
ordering = ["domain_id", "domain_name", "name",
"description", "email", "password",
"confirm_password", "project", "role_id",
"enabled"]
self.fields = collections.OrderedDict(
(key, self.fields[key]) for key in ordering)
role_choices = [(role.id, role.name) for role in roles]
self.fields['role_id'].choices = role_choices
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# For keystone V2.0, hide description field
else:
self.fields["description"].widget = forms.HiddenInput()
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain = api.keystone.get_default_domain(self.request)
try:
LOG.info('Creating user with name "%s"' % data['name'])
desc = data["description"]
if "email" in data:
data['email'] = data['email'] or None
new_user = \
api.keystone.user_create(request,
name=data['name'],
email=data['email'],
description=desc or None,
password=data['password'],
project=data['project'] or None,
enabled=data['enabled'],
domain=domain.id)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
if data['project'] and data['role_id']:
roles = api.keystone.roles_for_user(request,
new_user.id,
data['project']) or []
assigned = [role for role in roles if role.id == str(
data['role_id'])]
if not assigned:
try:
api.keystone.add_tenant_user_role(request,
data['project'],
new_user.id,
data['role_id'])
except Exception:
exceptions.handle(request,
_('Unable to add user '
'to primary project.'))
return new_user
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
except Exception:
exceptions.handle(request, _('Unable to create user.'))
class UpdateUserForm(BaseUserForm):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(max_length=255, label=_("User Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
email = forms.EmailField(
label=_("Email"),
required=False)
project = forms.ChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED)
def __init__(self, request, *args, **kwargs):
super(UpdateUserForm, self).__init__(request, *args, **kwargs)
if api.keystone.keystone_can_edit_user() is False:
for field in ('name', 'email'):
self.fields.pop(field)
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# For keystone V2.0, hide description field
else:
self.fields["description"].widget = forms.HiddenInput()
def handle(self, request, data):
user = data.pop('id')
data.pop('domain_id')
data.pop('domain_name')
if 'description' not in self.changed_data:
data.pop('description')
try:
if "email" in data:
data['email'] = data['email'] or None
response = api.keystone.user_update(request, user, **data)
messages.success(request,
_('User has been updated successfully.'))
except exceptions.Conflict:
msg = _('User name "%s" is already used.') % data['name']
messages.error(request, msg)
return False
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
class ChangePasswordForm(PasswordMixin, forms.SelfHandlingForm):
id = forms.CharField(widget=forms.HiddenInput)
name = forms.CharField(
label=_("User Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
def __init__(self, request, *args, **kwargs):
super(ChangePasswordForm, self).__init__(request, *args, **kwargs)
if getattr(settings, 'ENFORCE_PASSWORD_CHECK', False):
self.fields["admin_password"] = forms.CharField(
label=_("Admin Password"),
widget=forms.PasswordInput(render_value=False))
# Reorder form fields from multiple inheritance
self.fields.keyOrder = ["id", "name", "admin_password",
"password", "confirm_password"]
@sensitive_variables('data', 'password', 'admin_password')
def handle(self, request, data):
user_id = data.pop('id')
password = data.pop('password')
admin_password = None
# Throw away the password confirmation, we're done with it.
data.pop('confirm_password', None)
# Verify admin password before changing user password
if getattr(settings, 'ENFORCE_PASSWORD_CHECK', False):
admin_password = data.pop('admin_password')
if not api.keystone.user_verify_admin_password(request,
admin_password):
self.api_error(_('The admin password is incorrect.'))
return False
try:
response = api.keystone.user_update_password(
request, user_id, password)
if user_id == request.user.id:
return utils.logout_with_message(
request,
_('Password changed. Please log in to continue.'),
redirect=False)
messages.success(request,
_('User password has been updated successfully.'))
except Exception:
response = exceptions.handle(request, ignore=True)
messages.error(request, _('Unable to update the user password.'))
if isinstance(response, http.HttpResponse):
return response
else:
return True
|
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to collect and publish performance samples to various sinks."""
import abc
import io
import itertools
import json
import logging
import operator
import sys
import time
import uuid
from perfkitbenchmarker import disk
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
flags.DEFINE_string(
'product_name',
'PerfKitBenchmarker',
'The product name to use when publishing results.')
flags.DEFINE_boolean(
'official',
False,
'A boolean indicating whether results are official or not. The '
'default is False. Official test results are treated and queried '
'differently from non-official test results.')
flags.DEFINE_string(
'json_path',
None,
'A path to write newline-delimited JSON results '
'Default: write to a run-specific temporary directory')
flags.DEFINE_boolean(
'collapse_labels',
True,
'Collapse entries in labels.')
flags.DEFINE_string(
'bigquery_table',
None,
'The BigQuery table to publish results to. This should be of the form '
'"[project_id:]dataset_name.table_name".')
flags.DEFINE_string(
'bq_path', 'bq', 'Path to the "bq" executable.')
flags.DEFINE_string(
'bq_project', None, 'Project to use for authenticating with BigQuery.')
flags.DEFINE_string(
'service_account', None, 'Service account to use to authenticate with BQ.')
flags.DEFINE_string(
'service_account_private_key', None,
'Service private key for authenticating with BQ.')
flags.DEFINE_string(
'gsutil_path', 'gsutil', 'path to the "gsutil" executable')
flags.DEFINE_string(
'cloud_storage_bucket',
None,
'GCS bucket to upload records to. Bucket must exist.')
flags.DEFINE_list(
'metadata',
[],
'A list of key-value pairs that will be added to the labels field of all '
'samples as metadata. Each key-value pair in the list should be colon '
'separated.')
DEFAULT_JSON_OUTPUT_NAME = 'perfkitbenchmarker_results.json'
DEFAULT_CREDENTIALS_JSON = 'credentials.json'
GCS_OBJECT_NAME_LENGTH = 20
def GetLabelsFromDict(metadata):
"""Converts a metadata dictionary to a string of labels.
Args:
metadata: a dictionary of string key value pairs.
Returns:
A string of labels in the format that Perfkit uses.
"""
labels = []
for k, v in metadata.iteritems():
labels.append('|%s:%s|' % (k, v))
return ','.join(labels)
class MetadataProvider(object):
"""A provider of sample metadata."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def AddMetadata(self, metadata, benchmark_spec):
"""Add metadata to a dictionary.
Existing values will be overwritten.
Args:
metadata: dict. Dictionary of metadata to update.
benchmark_spec: BenchmarkSpec. The benchmark specification.
Returns:
Updated 'metadata'.
"""
raise NotImplementedError()
class DefaultMetadataProvider(MetadataProvider):
"""Adds default metadata to samples."""
def AddMetadata(self, metadata, benchmark_spec):
metadata = metadata.copy()
metadata['perfkitbenchmarker_version'] = version.VERSION
for name, vms in benchmark_spec.vm_groups.iteritems():
if len(vms) == 0:
continue
# Get a representative VM so that we can publish the cloud, zone,
# machine type, and image.
vm = vms[-1]
name_prefix = '' if name == 'default' else name + '_'
metadata[name_prefix + 'cloud'] = vm.CLOUD
metadata[name_prefix + 'zone'] = vm.zone
metadata[name_prefix + 'machine_type'] = vm.machine_type
metadata[name_prefix + 'image'] = vm.image
if vm.scratch_disks:
data_disk = vm.scratch_disks[0]
metadata[name_prefix + 'scratch_disk_type'] = data_disk.disk_type
metadata[name_prefix + 'scratch_disk_size'] = data_disk.disk_size
metadata[name_prefix + 'num_striped_disks'] = (
data_disk.num_striped_disks)
if data_disk.disk_type == disk.PIOPS:
metadata[name_prefix + 'scratch_disk_iops'] = data_disk.iops
# User specified metadata
for pair in FLAGS.metadata:
try:
key, value = pair.split(':')
metadata[key] = value
except ValueError:
logging.error('Bad metadata flag format. Skipping "%s".', pair)
continue
return metadata
DEFAULT_METADATA_PROVIDERS = [DefaultMetadataProvider()]
class SamplePublisher(object):
"""An object that can publish performance samples."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def PublishSamples(self, samples):
"""Publishes 'samples'.
PublishSamples will be called exactly once. Calling
SamplePublisher.PublishSamples multiple times may result in data being
overwritten.
Args:
samples: list of dicts to publish.
"""
raise NotImplementedError()
class PrettyPrintStreamPublisher(SamplePublisher):
"""Writes samples to an output stream, defaulting to stdout.
Samples are pretty-printed and summarized. Example output (truncated):
-------------------------PerfKitBenchmarker Results Summary--------------
COREMARK:
num_cpus="4"
Coremark Score 44145.237832
End to End Runtime 289.477677 seconds
NETPERF:
client_machine_type="n1-standard-4" client_zone="us-central1-a" ....
TCP_RR_Transaction_Rate 1354.04 transactions_per_second (ip_type="ext ...
TCP_RR_Transaction_Rate 3972.70 transactions_per_second (ip_type="int ...
TCP_CRR_Transaction_Rate 449.69 transactions_per_second (ip_type="ext ...
TCP_CRR_Transaction_Rate 1271.68 transactions_per_second (ip_type="int ...
TCP_STREAM_Throughput 1171.04 Mbits/sec (ip_type="ext ...
TCP_STREAM_Throughput 6253.24 Mbits/sec (ip_type="int ...
UDP_RR_Transaction_Rate 1380.37 transactions_per_second (ip_type="ext ...
UDP_RR_Transaction_Rate 4336.37 transactions_per_second (ip_type="int ...
End to End Runtime 444.33 seconds
-------------------------
For all tests: cloud="GCP" image="ubuntu-14-04" machine_type="n1-standa ...
Attributes:
stream: File-like object. Output stream to print samples.
"""
def __init__(self, stream=None):
self.stream = stream or sys.stdout
def __repr__(self):
return '<{0} stream={1}>'.format(type(self).__name__, self.stream)
def _FindConstantMetadataKeys(self, samples):
"""Finds metadata keys which are constant across a collection of samples.
Args:
samples: List of dicts, as passed to SamplePublisher.PublishSamples.
Returns:
The set of metadata keys for which all samples in 'samples' have the same
value.
"""
unique_values = {}
for sample in samples:
for k, v in sample['metadata'].iteritems():
if len(unique_values.setdefault(k, set())) < 2:
unique_values[k].add(v)
# Find keys which are not present in all samples
for sample in samples:
for k in frozenset(unique_values) - frozenset(sample['metadata']):
unique_values[k].add(None)
return frozenset(k for k, v in unique_values.iteritems() if len(v) == 1)
def _FormatMetadata(self, metadata):
"""Format 'metadata' as space-delimited key="value" pairs."""
return ' '.join('{0}="{1}"'.format(k, v)
for k, v in sorted(metadata.iteritems()))
def PublishSamples(self, samples):
# result will store the formatted text, then be emitted to self.stream and
# logged.
result = io.BytesIO()
dashes = '-' * 25
result.write('\n' + dashes +
'PerfKitBenchmarker Results Summary' +
dashes + '\n')
if not samples:
logging.debug('Pretty-printing results to %s:\n%s', self.stream,
result.getvalue())
self.stream.write(result.getvalue())
return
key = operator.itemgetter('test')
samples = sorted(samples, key=key)
globally_constant_keys = self._FindConstantMetadataKeys(samples)
for benchmark, test_samples in itertools.groupby(samples, key):
test_samples = list(test_samples)
# Drop end-to-end runtime: it always has no metadata.
non_endtoend_samples = [i for i in test_samples
if i['metric'] != 'End to End Runtime']
locally_constant_keys = (
self._FindConstantMetadataKeys(non_endtoend_samples) -
globally_constant_keys)
all_constant_meta = globally_constant_keys.union(locally_constant_keys)
benchmark_meta = {k: v for k, v in test_samples[0]['metadata'].iteritems()
if k in locally_constant_keys}
result.write('{0}:\n'.format(benchmark.upper()))
if benchmark_meta:
result.write(' {0}\n'.format(
self._FormatMetadata(benchmark_meta)))
for sample in test_samples:
meta = {k: v for k, v in sample['metadata'].iteritems()
if k not in all_constant_meta}
result.write(' {0:<30s} {1:>15f} {2:<30s}'.format(
sample['metric'], sample['value'], sample['unit']))
if meta:
result.write(' ({0})'.format(self._FormatMetadata(meta)))
result.write('\n')
global_meta = {k: v for k, v in samples[0]['metadata'].iteritems()
if k in globally_constant_keys}
result.write('\n' + dashes + '\n')
result.write('For all tests: {0}\n'.format(
self._FormatMetadata(global_meta)))
value = result.getvalue()
logging.debug('Pretty-printing results to %s:\n%s', self.stream, value)
self.stream.write(value)
class LogPublisher(SamplePublisher):
"""Writes samples to a Python Logger.
Attributes:
level: Logging level. Defaults to logging.INFO.
logger: Logger to publish to. Defaults to the root logger.
"""
def __init__(self, level=logging.INFO, logger=None):
self.level = level
self.logger = logger or logging.getLogger()
def __repr__(self):
return '<{0} logger={1} level={2}>'.format(type(self).__name__, self.logger,
self.level)
def PublishSamples(self, samples):
data = [
'\n' + '-' * 25 + 'PerfKitBenchmarker Complete Results' + '-' * 25 +
'\n']
for sample in samples:
data.append('%s\n' % sample)
self.logger.log(self.level, ''.join(data))
# TODO: Extract a function to write delimited JSON to a stream.
class NewlineDelimitedJSONPublisher(SamplePublisher):
"""Publishes samples to a file as newline delimited JSON.
The resulting output file is compatible with 'bq load' using
format NEWLINE_DELIMITED_JSON.
If 'collapse_labels' is True, metadata is converted to a flat string with key
'labels' via GetLabelsFromDict.
Attributes:
file_path: string. Destination path to write samples.
mode: Open mode for 'file_path'. Set to 'a' to append.
collapse_labels: boolean. If true, collapse sample metadata.
"""
def __init__(self, file_path, mode='wb', collapse_labels=True):
self.file_path = file_path
self.mode = mode
self.collapse_labels = collapse_labels
def __repr__(self):
return '<{0} file_path="{1}" mode="{2}">'.format(
type(self).__name__, self.file_path, self.mode)
def PublishSamples(self, samples):
logging.info('Publishing %d samples to %s', len(samples),
self.file_path)
with open(self.file_path, self.mode) as fp:
for sample in samples:
sample = sample.copy()
if self.collapse_labels:
sample['labels'] = GetLabelsFromDict(sample.pop('metadata', {}))
fp.write(json.dumps(sample) + '\n')
class BigQueryPublisher(SamplePublisher):
"""Publishes samples to BigQuery.
Attributes:
bigquery_table: string. The bigquery table to publish to, of the form
'[project_name:]dataset_name.table_name'
project_id: string. Project to use for authenticating with BigQuery.
bq_path: string. Path to the 'bq' executable'.
service_account: string. Use this service account email address for
authorization. For example, [email protected]
service_account_private_key: Filename that contains the service account
private key. Must be specified if service_account is specified.
"""
def __init__(self, bigquery_table, project_id=None, bq_path='bq',
service_account=None, service_account_private_key_file=None):
self.bigquery_table = bigquery_table
self.project_id = project_id
self.bq_path = bq_path
self.service_account = service_account
self.service_account_private_key_file = service_account_private_key_file
self._credentials_file = vm_util.PrependTempDir(DEFAULT_CREDENTIALS_JSON)
if ((self.service_account is None) !=
(self.service_account_private_key_file is None)):
raise ValueError('service_account and service_account_private_key '
'must be specified together.')
def __repr__(self):
return '<{0} table="{1}">'.format(type(self).__name__, self.bigquery_table)
def PublishSamples(self, samples):
if not samples:
logging.warn('No samples: not publishing to BigQuery')
return
with vm_util.NamedTemporaryFile(prefix='perfkit-bq-pub',
dir=vm_util.GetTempDir(),
suffix='.json') as tf:
json_publisher = NewlineDelimitedJSONPublisher(tf.name,
collapse_labels=True)
json_publisher.PublishSamples(samples)
tf.close()
logging.info('Publishing %d samples to %s', len(samples),
self.bigquery_table)
load_cmd = [self.bq_path]
if self.project_id:
load_cmd.append('--project_id=' + self.project_id)
if self.service_account:
assert self.service_account_private_key_file is not None
load_cmd.extend(['--service_account=' + self.service_account,
'--service_account_credential_file=' +
self._credentials_file,
'--service_account_private_key_file=' +
self.service_account_private_key_file])
load_cmd.extend(['load',
'--source_format=NEWLINE_DELIMITED_JSON',
self.bigquery_table,
tf.name])
vm_util.IssueRetryableCommand(load_cmd)
class CloudStoragePublisher(SamplePublisher):
"""Publishes samples to a Google Cloud Storage bucket using gsutil.
Samples are formatted using a NewlineDelimitedJSONPublisher, and written to a
the destination file within the specified bucket named:
<time>_<uri>
where <time> is the number of milliseconds since the Epoch, and <uri> is a
random UUID.
Attributes:
bucket: string. The GCS bucket name to publish to.
gsutil_path: string. The path to the 'gsutil' tool.
"""
def __init__(self, bucket, gsutil_path='gsutil'):
self.bucket = bucket
self.gsutil_path = gsutil_path
def __repr__(self):
return '<{0} bucket="{1}">'.format(type(self).__name__, self.bucket)
def _GenerateObjectName(self):
object_name = str(int(time.time() * 100)) + '_' + str(uuid.uuid4())
return object_name[:GCS_OBJECT_NAME_LENGTH]
def PublishSamples(self, samples):
with vm_util.NamedTemporaryFile(prefix='perfkit-gcs-pub',
dir=vm_util.GetTempDir(),
suffix='.json') as tf:
json_publisher = NewlineDelimitedJSONPublisher(tf.name)
json_publisher.PublishSamples(samples)
tf.close()
object_name = self._GenerateObjectName()
storage_uri = 'gs://{0}/{1}'.format(self.bucket, object_name)
logging.info('Publishing %d samples to %s', len(samples), storage_uri)
copy_cmd = [self.gsutil_path, 'cp', tf.name, storage_uri]
vm_util.IssueRetryableCommand(copy_cmd)
class SampleCollector(object):
"""A performance sample collector.
Supports incorporating additional metadata into samples, and publishing
results via any number of SamplePublishers.
Attributes:
samples: A list of Sample objects.
metadata_providers: A list of MetadataProvider objects. Metadata providers
to use. Defaults to DEFAULT_METADATA_PROVIDERS.
publishers: A list of SamplePublisher objects. If not specified, defaults to
a LogPublisher, PrettyPrintStreamPublisher, NewlineDelimitedJSONPublisher,
a BigQueryPublisher if FLAGS.bigquery_table is specified, and a
CloudStoragePublisher if FLAGS.cloud_storage_bucket is specified. See
SampleCollector._DefaultPublishers.
run_uri: A unique tag for the run.
"""
def __init__(self, metadata_providers=None, publishers=None):
self.samples = []
if metadata_providers is not None:
self.metadata_providers = metadata_providers
else:
self.metadata_providers = DEFAULT_METADATA_PROVIDERS
if publishers is not None:
self.publishers = publishers
else:
self.publishers = SampleCollector._DefaultPublishers()
logging.debug('Using publishers: {0}'.format(self.publishers))
@classmethod
def _DefaultPublishers(cls):
"""Gets a list of default publishers."""
publishers = [LogPublisher(), PrettyPrintStreamPublisher()]
default_json_path = vm_util.PrependTempDir(DEFAULT_JSON_OUTPUT_NAME)
publishers.append(NewlineDelimitedJSONPublisher(
FLAGS.json_path or default_json_path,
collapse_labels=FLAGS.collapse_labels))
if FLAGS.bigquery_table:
publishers.append(BigQueryPublisher(
FLAGS.bigquery_table,
project_id=FLAGS.bq_project,
bq_path=FLAGS.bq_path,
service_account=FLAGS.service_account,
service_account_private_key_file=FLAGS.service_account_private_key))
if FLAGS.cloud_storage_bucket:
publishers.append(CloudStoragePublisher(FLAGS.cloud_storage_bucket,
gsutil_path=FLAGS.gsutil_path))
return publishers
def AddSamples(self, samples, benchmark, benchmark_spec):
"""Adds data samples to the publisher.
Args:
samples: A list of Sample objects.
benchmark: string. The name of the benchmark.
benchmark_spec: BenchmarkSpec. Benchmark specification.
"""
for s in samples:
# Annotate the sample.
sample = dict(s.asdict())
sample['test'] = benchmark
for meta_provider in self.metadata_providers:
sample['metadata'] = meta_provider.AddMetadata(
sample['metadata'], benchmark_spec)
sample['product_name'] = FLAGS.product_name
sample['official'] = FLAGS.official
sample['owner'] = FLAGS.owner
sample['run_uri'] = benchmark_spec.uuid
sample['sample_uri'] = str(uuid.uuid4())
events.sample_created.send(benchmark_spec=benchmark_spec,
sample=sample)
self.samples.append(sample)
def PublishSamples(self):
"""Publish samples via all registered publishers."""
for publisher in self.publishers:
publisher.PublishSamples(self.samples)
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import logging
import ntpath
import posixpath
from dashboard.common import histogram_helpers
from dashboard.pinpoint.models import errors
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models import task as task_module
from dashboard.pinpoint.models.quest import read_value as read_value_quest
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import run_test
from tracing.value import histogram_set
HistogramOptions = collections.namedtuple(
'HistogramOptions',
('grouping_label', 'story', 'statistic', 'histogram_name'))
GraphJsonOptions = collections.namedtuple('GraphJsonOptions',
('chart', 'trace'))
TaskOptions = collections.namedtuple(
'TaskOptions', ('test_options', 'benchmark', 'histogram_options',
'graph_json_options', 'mode'))
class CompleteReadValueAction(
collections.namedtuple('CompleteReadValueAction',
('job', 'task', 'state'))):
__slots__ = ()
def __str__(self):
return 'CompleteReadValueAction(job = %s, task = %s)' % (
self.job.job_id, self.task.id)
@task_module.LogStateTransitionFailures
def __call__(self, _):
task_module.UpdateTask(
self.job, self.task.id, new_state=self.state, payload=self.task.payload)
class ReadValueEvaluator(
collections.namedtuple('ReadValueEvaluator', ('job',))):
__slots__ = ()
def CompleteWithError(self, task, reason, message):
task.payload.update({
'tries':
task.payload.get('tries', 0) + 1,
'errors':
task.payload.get('errors', []) + [{
'reason': reason,
'message': message
}]
})
return [CompleteReadValueAction(self.job, task, 'failed')]
def __call__(self, task, _, accumulator):
# TODO(dberris): Validate!
# Outline:
# - Retrieve the data given the options.
# - Parse the data from the result file.
# - Update the status and payload with an action.
if task.status in {'completed', 'failed'}:
return None
dep = accumulator.get(task.dependencies[0], {})
isolate_server = dep.get('isolate_server')
isolate_hash = dep.get('isolate_hash')
dependency_status = dep.get('status', 'failed')
if dependency_status == 'failed':
return self.CompleteWithError(
task, 'DependencyFailed',
'Task dependency "%s" ended in failed status.' %
(task.dependencies[0],))
if dependency_status in {'pending', 'ongoing'}:
return None
try:
data = read_value_quest.RetrieveOutputJson(
isolate_server, isolate_hash, task.payload.get('results_filename'))
if task.payload.get('mode') == 'histogram_sets':
return self.HandleHistogramSets(task, data)
elif task.payload.get('mode') == 'graph_json':
return self.HandleGraphJson(task, data)
else:
return self.CompleteWithError(
task, 'UnsupportedMode',
('Pinpoint only currently supports reading '
'HistogramSets and GraphJSON formatted files.'))
except (errors.FatalError, errors.InformationalError,
errors.RecoverableError) as e:
return self.CompleteWithError(task, type(e).__name__, e.message)
def HandleHistogramSets(self, task, histogram_dicts):
histogram_options = task.payload.get('histogram_options', {})
grouping_label = histogram_options.get('grouping_label', '')
histogram_name = histogram_options.get('histogram_name')
story = histogram_options.get('story')
statistic = histogram_options.get('statistic')
histograms = histogram_set.HistogramSet()
histograms.ImportDicts(histogram_dicts)
histograms_by_path = read_value_quest.CreateHistogramSetByTestPathDict(
histograms)
histograms_by_path_optional_grouping_label = (
read_value_quest.CreateHistogramSetByTestPathDict(
histograms, ignore_grouping_label=True))
trace_urls = read_value_quest.FindTraceUrls(histograms)
test_paths_to_match = set([
histogram_helpers.ComputeTestPathFromComponents(
histogram_name, grouping_label=grouping_label, story_name=story),
histogram_helpers.ComputeTestPathFromComponents(
histogram_name,
grouping_label=grouping_label,
story_name=story,
needs_escape=False)
])
logging.debug('Test paths to match: %s', test_paths_to_match)
try:
result_values = read_value_quest.ExtractValuesFromHistograms(
test_paths_to_match, histograms_by_path, histogram_name,
grouping_label, story, statistic)
except errors.ReadValueNotFound:
result_values = read_value_quest.ExtractValuesFromHistograms(
test_paths_to_match, histograms_by_path_optional_grouping_label,
histogram_name, None, story, statistic)
logging.debug('Results: %s', result_values)
task.payload.update({
'result_values': result_values,
'tries': 1,
})
if trace_urls:
task.payload['trace_urls'] = [{
'key': 'trace',
'value': url['name'],
'url': url['url'],
} for url in trace_urls]
return [CompleteReadValueAction(self.job, task, 'completed')]
def HandleGraphJson(self, task, data):
chart = task.payload.get('graph_json_options', {}).get('chart', '')
trace = task.payload.get('graph_json_options', {}).get('trace', '')
if not chart and not trace:
task.payload.update({
'result_values': [],
'tries': task.payload.get('tries', 0) + 1
})
return [CompleteReadValueAction(self.job, task, 'completed')]
if chart not in data:
raise errors.ReadValueChartNotFound(chart)
if trace not in data[chart]['traces']:
raise errors.ReadValueTraceNotFound(trace)
task.payload.update({
'result_values': [float(data[chart]['traces'][trace][0])],
'tries': task.payload.get('tries', 0) + 1
})
return [CompleteReadValueAction(self.job, task, 'completed')]
class Evaluator(evaluators.FilteringEvaluator):
def __init__(self, job):
super(Evaluator, self).__init__(
predicate=evaluators.All(
evaluators.TaskTypeEq('read_value'),
evaluators.TaskStatusIn({'pending'})),
delegate=evaluators.SequenceEvaluator(
evaluators=(evaluators.TaskPayloadLiftingEvaluator(),
ReadValueEvaluator(job))))
def ResultSerializer(task, _, accumulator):
results = accumulator.setdefault(task.id, {})
results.update({
'completed':
task.status in {'completed', 'failed', 'cancelled'},
'exception':
','.join(e.get('reason') for e in task.payload.get('errors', []))
or None,
'details': []
})
trace_urls = task.payload.get('trace_urls')
if trace_urls:
results['details'].extend(trace_urls)
class Serializer(evaluators.FilteringEvaluator):
def __init__(self):
super(Serializer, self).__init__(
predicate=evaluators.All(
evaluators.TaskTypeEq('read_value'),
evaluators.TaskStatusIn(
{'ongoing', 'failed', 'completed', 'cancelled'}),
),
delegate=ResultSerializer)
def CreateGraph(options):
if not isinstance(options, TaskOptions):
raise ValueError('options must be an instance of read_value.TaskOptions')
subgraph = run_test.CreateGraph(options.test_options)
path = None
if read_value_quest.IsWindows({'dimensions': options.test_options.dimensions
}):
path = ntpath.join(options.benchmark, 'perf_results.json')
else:
path = posixpath.join(options.benchmark, 'perf_results.json')
# We create a 1:1 mapping between a read_value task and a run_test task.
def GenerateVertexAndDep(attempts):
for attempt in range(attempts):
change_id = find_isolate.ChangeId(
options.test_options.build_options.change)
read_value_id = 'read_value_%s_%s' % (change_id, attempt)
run_test_id = run_test.TaskId(change_id, attempt)
yield (task_module.TaskVertex(
id=read_value_id,
vertex_type='read_value',
payload={
'benchmark': options.benchmark,
'mode': options.mode,
'results_filename': path,
'histogram_options': options.histogram_options._asdict(),
'graph_json_options': options.graph_json_options._asdict(),
'change': options.test_options.build_options.change.AsDict(),
'index': attempt,
}), task_module.Dependency(from_=read_value_id, to=run_test_id))
for vertex, edge in GenerateVertexAndDep(options.test_options.attempts):
subgraph.vertices.append(vertex)
subgraph.edges.append(edge)
return subgraph
|
|
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary", 'WeakSet']
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
UserDict.UserDict.__init__(self, *args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
with _IterationGuard(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
with _IterationGuard(self):
for k in self.data.iterkeys():
yield k
__iter__ = iterkeys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.itervalues():
yield wr
def itervalues(self):
with _IterationGuard(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
with _IterationGuard(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.iterkeys():
yield wr
def iterkeys(self):
with _IterationGuard(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
__iter__ = iterkeys
def itervalues(self):
with _IterationGuard(self):
for value in self.data.itervalues():
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from divide import divide
import config
def kdtree(_input, path=None, treemap=None, parentmeta=None, parent=None):
_input=[n for n in _input]
if None == treemap:
# root
treemap = {}
path = []
node = Node()
node.parent = parent
node.path = path
depth = len(path)
if len(_input) < 2:
node.leaf = True
node.overlap = False
node.content = _input
node.key = _input[0][1]
else:
_input2 = [[pos[depth % 2], pos[depth % 2 + 2]]
for pos, _, _ in _input] # column first
children = divide(zip(_input2, _input))
if len(children) < 2 and depth > 0:
node.leaf = True
node.overlap = True
node.content = _input
else:
node.leaf = False
node.children = []
for child, i in zip(children, range(len(children))):
children_path = path + [i]
_list = [i[1] for i in child]
node_child = kdtree(_list, path=children_path,
treemap=treemap, parent=node)
node.children.append(node_child)
if node_child.overlap:
node.overlap = True
if node.leaf:
for _, key, pos in node.content:
treemap[key] = node
children_pos = [pos for _, key, pos in node.content]
else:
children_pos = [_child.position for _child in node.children]
node.key = node.children[0].key
minx, miny = 10**6, 10**6
maxx, maxy = -10**6, -10**6
for pos in children_pos:
minx = min(minx, pos[0])
miny = min(miny, pos[1])
maxx = max(maxx, pos[2])
maxy = max(maxy, pos[3])
node.position = [minx, miny, maxx, maxy]
if 0 == depth:
_node = Node()
_node.position = [i for i in node.position]
_node.overlap = node.overlap
_node.path = [0]
_node.children = [node]
node.parent = _node
__node = Node()
__node.position = [i for i in node.position]
__node.overlap = node.overlap
__node.path = []
__node.children = [_node]
_node.parent = __node
return __node, treemap
else:
return node
def create_parent(node):
parent = Node()
parent.parent = node.parent
node.parent = parent
parent.overlap = node.overlap
parent.position = [i for i in node.position]
parent.path = node.path
parent.children = [node]
index = parent.parent.children.index(node)
parent.parent.children[index] = parent
return parent
def create_sibling(node):
sibling=Node()
sibling.parent=node.parent
sibling.position=[i for i in node.position]
i=node.parent.children.index(node)
node.parent.children.insert(i+1,sibling)
return sibling
class Node:
parent = None
path = None
children = None
key = None
leaf = None
overlap = None
position = None
content = None
modified = False
def __str__(self):
msg = self.path, self.leaf, self.overlap, self.position, self.content,
return ",".join([str(i) for i in msg])
def regularize(node, border):
x0, y0, x1, y1 = node.position
depth = len(node.path)
if node.leaf:
return
for child in node.children:
if len(node.path) % 2 == 0:
child.position[1], child.position[3] = y0, y1
else:
child.position[0], child.position[2] = x0, x1
if len(node.path) % 2 == 0:
i0, i1, b = x0, x1, border[0]
index0, index1 = 0, 2
else:
i0, i1, b = y0, y1, border[1]
index0, index1 = 1, 3
modified_by_user = False
modified_index = -1
for child, i in zip(node.children, range(len(node.children))):
if child.modified:
modified_index = i
modified_by_user = True
i0 = i0 - b
size = i1 - i0 - len(node.children) * b
size_sum = 0
for child, i in zip(node.children, range(len(node.children))):
if i > modified_index or 1 + modified_index == len(node.children) and not child.modified:
size_sum += child.position[index1] - child.position[index0]
else:
size -= child.position[index1] - child.position[index0]
i = i0
for child, index in zip(node.children, range(len(node.children))):
i += b
if size == size_sum:
_size = child.position[index1] - child.position[index0]
elif modified_by_user:
assert len(node.children) > 1
_size = child.position[index1] - child.position[index0]
# if not child.modified:
if index > modified_index:
_size = _size * size / size_sum
_size = int(_size)
if modified_index + 1 == len(node.children) and not child.modified:
_size = _size * size / size_sum
_size = int(_size)
else:
_size = int(size) / len(node.children)
child.position[index0] = i
i += _size
child.position[index1] = i
node.children[-1].position[index1] = i1
for child, i in zip(node.children, range(len(node.children))):
child.path = node.path + [i]
regularize(child, border)
return
def remove_single_child_node(node):
if node.leaf:
return
l = []
if len(node.children) == 1:
child = node.children[0]
if not child.leaf:
if not None == node.parent:
i = node.parent.children.index(node)
node.parent.children.remove(node)
for grandchild in child.children:
node.parent.children.insert(i, grandchild)
grandchild.parent = node.parent
i += 1
l = child.children
else:
l = node.children
for child in l:
remove_single_child_node(child)
def getLayoutAndKey(node, result=None, min_width=config.MIN_WINDOW_WIDTH, min_height=config.MIN_WINDOW_HEIGHT):
if None == result:
reach_size_limit=False
result =[ [], [],reach_size_limit]
if node.leaf:
x0, y0, x1, y1 = node.position
if x1 - x0 < min_width or y1 - y0 < min_height:
#("reach min size")
result[2]=True
return result
layout = [x0, y0, x1 - x0, y1 - y0]
result[0].append(layout)
result[1].append(node.key)
else:
for child in node.children:
getLayoutAndKey(child, result)
if result[2]:
return result
return result
|
|
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.exceptions import NotFound
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import url_for, current_app
from flask_sqlalchemy import SQLAlchemy
from .helpers import args_from_url
from .errors import ValidationError
from sqlalchemy import types
from sqlalchemy.databases import mysql
from marshmallow import Schema, fields, ValidationError, pre_load, validate
import uuid
db = SQLAlchemy()
class UUID(types.TypeDecorator):
impl = mysql.MSBinary
def __init__(self):
self.impl.length = 16
types.TypeDecorator.__init__(self,length=self.impl.length)
def process_bind_param(self,value,dialect=None):
if value and isinstance(value,uuid.UUID):
return value.bytes
elif value and not isinstance(value,uuid.UUID):
raise ValueError,'value %s is not a valid uuid.UUID' % value
else:
return None
def process_result_value(self,value,dialect=None):
if value:
uniq_uuid = uuid.UUID(bytes=value)
return uniq_uuid
else:
return None
def is_mutable(self):
return False
class Properties(db.Model):
__tablename__ = 'properties'
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String, unique=True, index = True) # Defines any properties key, value
value = db.Column(db.String, index=True)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class Hub(db.Model):
__tablename__ = 'hub'
id = db.Column(db.Integer, primary_key=True)
hub_id = db.Column(db.Integer, unique=True) # Hub ID internally populated by other script
hub_type = db.Column(db.Integer)
description = db.Column(db.String(255))
external_url = db.Column(db.String(255))
internal_url = db.Column(db.String(255))
status = db.Column(db.Boolean, default=True) # Hub's status (Active = True, Inactive = False
activated_at = db.Column(db.DateTime) # When was Hub activated, as of 17-Jan-16 it would when the record was created
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class HubTypes(db.Model):
__tablename__ = 'hub_types'
id = db.Column(db.Integer, primary_key=True)
hub_type = db.Column(db.Integer) # Section Types eg. 10=Switching, 11=TV remote, 12=Camera, etc
hub_type_desc = db.Column(db.String)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class SectionTypes(db.Model):
__tablename__ = 'section_types'
id = db.Column(db.Integer, primary_key=True)
section_type = db.Column(db.Integer) # Section Types eg. 10=House:Living Room, 11=Kitchen, 12=Bedroom, 13=Bathroom etc,
section_type_desc = db.Column(db.String)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class EndpointTypes(db.Model):
__tablename__ = 'endpoint_types'
id = db.Column(db.Integer, primary_key=True)
node_type = db.Column(db.Integer) # Node Types eg. 10=Webswitch, 11=TouchPanel, 12=TV, 13=Music, 14=AC
node_type_desc = db.Column(db.String)
node_category = db.Column(db.String, default = 'simple') # This field marks if the Node is of complex or simple type
endpoint_type = db.Column(db.Integer) # Endpoint types depends on NodeTypes eg. NodeType 10 - 10=Switch 11=Dimmer
endpoint_type_desc = db.Column(db.String)
status_min = db.Column(db.Integer)
status_max = db.Column(db.Integer)
method = db.Column(db.String)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class Endpoint(db.Model):
__tablename__ = 'endpoint'
id = db.Column(db.Integer, primary_key=True)
internal_sec_id = db.Column(db.Integer) # Defines the Room/Section number that has been given during Installation, this is what is considered during operation
section_type = db.Column(db.Integer) # Section Types eg. 10=House:Living Room, 11=Kitchen, 12=Bedroom, 13=Bathroom etc,
internal_sec_desc = db.Column(db.String(255), index=True)
internal_nod_id = db.Column(db.Integer) # Defines the Node number that has been given during Installation, this is what is considered during operation
node_type = db.Column(db.Integer) # Node Types eg. 10=Webswitch, 11=TouchPanel, 12=TV, 13=Music, 14=AC
internal_nod_desc = db.Column(db.String(255), index=True)
internal_end_id = db.Column(db.Integer) # Defines the Endpoint number that has been given during Installation, this is what is considered during operation
endpoint_type = db.Column(db.Integer) # Endpoint types depends on NodeTypes eg. NodeType 10 - 10=Switch 11=Dimmer
endpoint_uuid = db.Column(UUID(), default = uuid.uuid4, index=True)
internal_end_desc = db.Column(db.String(255), index=True)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class WSNodeStatus(db.Model):
__tablename__ = 'ws_node_status'
internal_nod_id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer)
class EndpointStatus(db.Model):
__tablename__ = 'endpoint_status'
endpoint_uuid = db.Column(UUID(), primary_key=True)
status = db.Column(db.Integer)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class EndpointGroup(db.Model):
__tablename__ = 'endpoint_group'
group_uuid = db.Column(UUID(), primary_key=True, default = uuid.uuid4)
endpoint_uuid = db.Column(UUID(), primary_key=True)
group_desc = db.Column(db.String)
expected_status = db.Column(db.Integer)
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime)
class Schedule(db.Model):
__tablename__ = 'schedule'
id = db.Column(db.Integer, primary_key=True)
uuid_id = db.Column(UUID(), index=True) # UUID = Endpoint UUID or Group UUID
expected_status = db.Column(db.Integer) # Expected Status when scheduler value is true
year = db.Column(db.Integer)
month = db.Column(db.Integer)
weekday = db.Column(db.Integer)
date = db.Column(db.Integer)
hour = db.Column(db.Integer)
min = db.Column(db.Integer)
hourly = db.Column(db.Boolean, default = False)
daily = db.Column(db.Boolean, default = False)
weekly = db.Column(db.Boolean, default = False)
monthly = db.Column(db.Boolean, default = False)
yearly = db.Column(db.Boolean, default = False)
onlyonce = db.Column(db.Boolean, default = True)
status = db.Column(db.Boolean, default=True) # Schedule status (Active = True, Inactive = False
last_changed_by = db.Column(db.String)
last_changed_on = db.Column(db.DateTime) # When was Hub activated, as of 17-Jan-16 it would when the record was created
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(), index=True)
group = db.Column(db.String(), default = 'USER')
password_hash = db.Column(db.String(128))
mobile_no = db.Column(db.Integer)
email = db.Column(db.String)
login_date = db.Column(db.DateTime)
created_date = db.Column(db.DateTime)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_auth_token(self, expires_in=600):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expires_in)
return s.dumps({'id': self.id}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
##### SCHEMAS #####
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class PropertiesSchema(Schema):
id = fields.Int(dump_only=True)
key = fields.Str()
value = fields.Str()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class HubSchema(Schema):
id = fields.Int(dump_only=True)
hub_id = fields.Int()
hub_type = fields.Int()
description = fields.Str()
external_url = fields.Str()
internal_url = fields.Str()
status = fields.Bool()
activated_at = fields.DateTime(dump_only=True)
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class HubTypesSchema(Schema):
id = fields.Int(dump_only=True)
hub_type = fields.Number(validate=lambda n: 10 <= n <= 99)
hub_type_desc = fields.Str()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class SectionTypesSchema(Schema):
id = fields.Int(dump_only=True)
section_type = fields.Number(validate=lambda n: 10 <= n <= 99)
section_type_desc = fields.Str()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class EndpointTypesSchema(Schema):
id = fields.Int(dump_only=True)
node_type = fields.Number(validate=lambda n: 10 <= n <= 99)
node_type_desc = fields.Str()
node_category = fields.Str()
endpoint_type = fields.Number(validate=lambda n: 1000 <= n <= 9999)
endpoint_type_desc = fields.Str()
status_min = fields.Int()
status_max = fields.Int()
method = fields.Str()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class EndpointSchema(Schema):
id = fields.Int(dump_only=True)
internal_sec_id = fields.Int(required=True)
section_type = fields.Number(validate=lambda n: 10 <= n <= 99)
internal_sec_desc = fields.Str()
internal_nod_id = fields.Int(required=True)
node_type = fields.Number(validate=lambda n: 10 <= n <= 99)
internal_nod_desc = fields.Str()
internal_end_id = fields.Int(required=True)
endpoint_type = fields.Number(validate=lambda n: 1000 <= n <= 9999)
endpoint_uuid = fields.UUID(dump_only=True)
internal_end_desc = fields.Str()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class EndpointStatusSchema(Schema):
endpoint_uuid = fields.UUID()
status = fields.Int()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class EndpointGroupSchema(Schema):
group_uuid = fields.UUID(dump_only=True)
endpoint_uuid = fields.UUID()
group_desc = fields.Str(required=True)
expected_status = fields.Int()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class ScheduleSchema(Schema):
id = db.Column(db.Integer, primary_key=True)
uuid_id = fields.UUID()
expected_status = fields.Int() # Expected Status when scheduler value is true
year = fields.Int()
month = fields.Int()
weekday = fields.Int()
date = fields.Int()
hour = fields.Int()
min = fields.Int()
hourly = fields.Bool()
daily = fields.Bool()
weekly = fields.Bool()
monthly = fields.Bool()
yearly = fields.Bool()
onlyonce = fields.Bool()
status = fields.Bool()
last_changed_by = fields.Str(validate=[validate.Length(max=64)])
last_changed_on = fields.DateTime(dump_only=True)
class UserSchema(Schema):
id = fields.Int(dump_only=True)
username = fields.Str(validate=[validate.Length(max=64)])
group = fields.Str(validate=[validate.Length(max=50)])
password = fields.Str(validate=[validate.Length(min=6, max=36)],load_only=True)
mobile_no = fields.Int()
email = fields.Str()
login_date = fields.DateTime()
created_date = fields.DateTime()
# properties_schema = PropertiesSchema(exclude=('id',))
properties_schema = PropertiesSchema()
properties_schemas = PropertiesSchema(many=True)
hub_schema = HubSchema()
section_types_schema = SectionTypesSchema()
section_types_schemas = SectionTypesSchema(many=True)
endpoint_types_schema = EndpointTypesSchema()
endpoint_types_schemas = EndpointTypesSchema(many=True)
endpoint_schema = EndpointSchema()
endpoint_schemas = EndpointSchema(many=True)
endpoint_status_schema = EndpointStatusSchema()
endpoint_status_schemas = EndpointStatusSchema(many=True)
endpoint_group_schema = EndpointGroupSchema()
endpoint_group_schemas = EndpointGroupSchema(many=True)
schedule_schema = ScheduleSchema()
schedule_schemas = ScheduleSchema(many=True)
user_schema = UserSchema()
user_schemas = UserSchema(many=True)
|
|
"""Test config validators."""
from collections import OrderedDict
from datetime import date, datetime, timedelta
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import uuid
import pytest
import voluptuous as vol
import homeassistant
from homeassistant.helpers import config_validation as cv, template
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in (
None,
"T",
"negative",
"lock",
"tr ue",
[],
[1, 2],
{"one": "two"},
test_boolean,
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("true", "On", "1", "YES", " true ", "enable", 1, 50, True, 0.1):
assert schema(value)
for value in ("false", "Off", "0", "NO", "disable", 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ("invalid", None, -91, 91, "-91", "91", "123.01A"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("-89", 89, "12.34"):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ("invalid", None, -181, 181, "-181", "181", "123.01A"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("-179", 179, "12.34"):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ("invalid", None, -1, 0, 80000, "81000"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("1000", 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = "this-file-does-not.exist"
assert not os.path.isfile(fake_file)
for value in ("invalid", None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch("os.path.isfile", Mock(return_value=True)), patch(
"os.access", Mock(return_value=True)
):
schema("test.txt")
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in (
"invalid",
None,
100,
"htp://ha.io",
"http//ha.io",
"http://??,**",
"https://??,**",
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
"http://localhost",
"https://localhost/test/index.html",
"http://home-assistant.io",
"http://home-assistant.io/test/",
"https://community.home-assistant.io/",
):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = ({}, {"hello": "world"})
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = ({"platform": "mqtt"}, {"platform": "mqtt", "beer": "yes"})
for value in options:
cv.PLATFORM_SCHEMA_BASE(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ["1"] == schema("1")
assert ["1"] == schema(["1"])
assert [{"1": "2"}] == schema({"1": "2"})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema("invalid_entity")
assert schema("sensor.LIGHT") == "sensor.light"
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
"invalid_entity",
"sensor.light,sensor_invalid",
["invalid_entity"],
["sensor.light", "sensor_invalid"],
["sensor.light,sensor_invalid"],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ([], ["sensor.light"], "sensor.light")
for value in options:
schema(value)
assert schema("sensor.LIGHT, light.kitchen ") == ["sensor.light", "light.kitchen"]
def test_entity_domain():
"""Test entity domain validation."""
schema = vol.Schema(cv.entity_domain("sensor"))
for value in (
"invalid_entity",
"cover.demo",
"cover.demo,sensor.another_entity",
"",
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
assert schema("sensor.LIGHT") == "sensor.light"
schema = vol.Schema(cv.entity_domain(("sensor", "binary_sensor")))
for value in ("invalid_entity", "cover.demo"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
assert schema("sensor.LIGHT") == "sensor.light"
assert schema("binary_sensor.LIGHT") == "binary_sensor.light"
def test_entities_domain():
"""Test entities domain validation."""
schema = vol.Schema(cv.entities_domain("sensor"))
options = (
None,
"",
"invalid_entity",
["sensor.light", "cover.demo"],
["sensor.light", "sensor_invalid"],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ("sensor.light", ["SENSOR.light"], ["sensor.light", "sensor.demo"])
for value in options:
schema(value)
assert schema("sensor.LIGHT, sensor.demo ") == ["sensor.light", "sensor.demo"]
assert schema(["sensor.light", "SENSOR.demo"]) == ["sensor.light", "sensor.demo"]
def test_ensure_list_csv():
"""Test ensure_list_csv."""
schema = vol.Schema(cv.ensure_list_csv)
options = (None, 12, [], ["string"], "string1,string2")
for value in options:
schema(value)
assert schema("string1, string2 ") == ["string1", "string2"]
def test_event_schema():
"""Test event_schema validation."""
options = (
{},
None,
{"event_data": {}},
{"event": "state_changed", "event_data": 1},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{"event": "state_changed"},
{"event": "state_changed", "event_data": {"hello": "world"}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, "work"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema("mdi:work")
schema("custom:prefix")
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None,
"",
"hello:world",
"12:",
"12:34:56:78",
{},
{"wrong_key": -10},
"12.5:30",
"12:30.5",
"12.5:30:30",
"12:30.5:30",
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
("8:20", timedelta(hours=8, minutes=20)),
("23:59", timedelta(hours=23, minutes=59)),
("-8:20", -1 * timedelta(hours=8, minutes=20)),
("-1:15", -1 * timedelta(hours=1, minutes=15)),
("-23:59:59", -1 * timedelta(hours=23, minutes=59, seconds=59)),
("-48:00", -1 * timedelta(days=2)),
({"minutes": 5}, timedelta(minutes=5)),
(1, timedelta(seconds=1)),
("5", timedelta(seconds=5)),
("180", timedelta(seconds=180)),
("00:08:20.5", timedelta(minutes=8, seconds=20, milliseconds=500)),
("00:23:59.999", timedelta(minutes=23, seconds=59, milliseconds=999)),
("-00:08:20.5", -1 * timedelta(minutes=8, seconds=20, milliseconds=500)),
(
"-12:59:59.999",
-1 * timedelta(hours=12, minutes=59, seconds=59, milliseconds=999),
),
({"milliseconds": 1.5}, timedelta(milliseconds=1, microseconds=500)),
({"seconds": "1.5"}, timedelta(seconds=1, milliseconds=500)),
({"minutes": "1.5"}, timedelta(minutes=1, seconds=30)),
({"hours": -1.5}, -1 * timedelta(hours=1, minutes=30)),
({"days": "-1.5"}, -1 * timedelta(days=1, hours=12)),
)
for value, result in options:
assert schema(value) == result
def test_remove_falsy():
"""Test remove falsy."""
assert cv.remove_falsy([0, None, 1, "1", {}, [], ""]) == [1, "1"]
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema("invalid_turn_on")
schema("homeassistant.turn_on")
def test_service_schema():
"""Test service_schema validation."""
options = (
{},
None,
{
"service": "homeassistant.turn_on",
"service_template": "homeassistant.turn_on",
},
{"data": {"entity_id": "light.kitchen"}},
{"service": "homeassistant.turn_on", "data": None},
{
"service": "homeassistant.turn_on",
"data_template": {"brightness": "{{ no_end"},
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{"service": "homeassistant.turn_on"},
{"service": "homeassistant.turn_on", "entity_id": "light.kitchen"},
{"service": "light.turn_on", "entity_id": "all"},
{
"service": "homeassistant.turn_on",
"entity_id": ["light.kitchen", "light.ceiling"],
},
{
"service": "light.turn_on",
"entity_id": "all",
"alias": "turn on kitchen lights",
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, "hello world"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, "hello"):
schema(value)
def test_string(hass):
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.Invalid):
schema(None)
with pytest.raises(vol.Invalid):
schema([])
with pytest.raises(vol.Invalid):
schema({})
for value in (True, 1, "hello"):
schema(value)
# Test template support
for text, native in (
("[1, 2]", [1, 2]),
("{1, 2}", {1, 2}),
("(1, 2)", (1, 2)),
('{"hello": True}', {"hello": True}),
):
tpl = template.Template(text, hass)
result = tpl.async_render()
assert isinstance(result, template.ResultWrapper)
assert result == native
assert schema(result) == text
def test_string_with_no_html():
"""Test string with no html validation."""
schema = vol.Schema(cv.string_with_no_html)
with pytest.raises(vol.Invalid):
schema("This has HTML in it <a>Link</a>")
with pytest.raises(vol.Invalid):
schema("<b>Bold</b>")
for value in (
True,
3,
"Hello",
"**Hello**",
"This has no HTML [Link](https://home-assistant.io)",
):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema("K")
schema("C")
schema("F")
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema("Q1")
schema("q55")
schema("garbage_addr")
schema("a1")
schema("C11")
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, "{{ partial_print }", "{% if True %}Hello", ["test"]):
with pytest.raises(vol.Invalid):
schema(value)
options = (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
)
for value in options:
schema(value)
def test_dynamic_template():
"""Test dynamic template validator."""
schema = vol.Schema(cv.dynamic_template)
for value in (
None,
1,
"{{ partial_print }",
"{% if True %}Hello",
["test"],
"just a string",
):
with pytest.raises(vol.Invalid):
schema(value)
options = (
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in ("{{ partial_print }", "{% if True %}Hello"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
{"test": 1, "test2": "{{ beer }}"},
["{{ beer }}", 1],
)
for value in options:
schema(value)
# ensure the validator didn't mutate the input
assert options == (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
{"test": 1, "test2": "{{ beer }}"},
["{{ beer }}", 1],
)
# Ensure we don't mutate non-string types that cannot be templates.
for value in (1, True, None):
assert schema(value) == value
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema("America/Do_Not_Exist")
schema("America/Los_Angeles")
schema("UTC")
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ["Not a date", "23:42", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema("2016-11-23")
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ["Not a time", "2016-11-23", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema("23:42:00")
schema("23:42")
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), "Wrong DateTime"]:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema("2016-11-23T18:59:08")
def test_multi_select():
"""Test multi select validation.
Expected behavior:
- Will not accept any input but a list
- Will not accept selections outside of configured scope
"""
schema = vol.Schema(cv.multi_select({"paulus": "Paulus", "robban": "Robban"}))
with pytest.raises(vol.Invalid):
schema("robban")
schema(["paulus", "martinhj"])
schema(["robban", "paulus"])
def test_multi_select_in_serializer():
"""Test multi_select with custom_serializer."""
assert cv.custom_serializer(cv.multi_select({"paulus": "Paulus"})) == {
"type": "multi_select",
"options": {"paulus": "Paulus"},
}
def test_boolean_in_serializer():
"""Test boolean with custom_serializer."""
assert cv.custom_serializer(cv.boolean) == {
"type": "boolean",
}
def test_string_in_serializer():
"""Test string with custom_serializer."""
assert cv.custom_serializer(cv.string) == {
"type": "string",
}
def test_positive_time_period_dict_in_serializer():
"""Test positive_time_period_dict with custom_serializer."""
assert cv.custom_serializer(cv.positive_time_period_dict) == {
"type": "positive_time_period_dict",
}
@pytest.fixture
def schema():
"""Create a schema used for testing deprecation."""
return vol.Schema({"venus": cv.boolean, "mars": cv.boolean, "jupiter": cv.boolean})
@pytest.fixture
def version(monkeypatch):
"""Patch the version used for testing to 0.5.0."""
monkeypatch.setattr(homeassistant.const, "__version__", "0.5.0")
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(cv.deprecated("mars"), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name in [
__name__,
"homeassistant.helpers.config_validation",
]
assert (
"The 'mars' option is deprecated, please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter"), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_default(caplog, schema):
"""
Test deprecation behaves correctly with a default value.
This is likely a scenario that would never occur.
Expected behavior:
- Behaves identically as when the default value was not present
"""
deprecated_schema = vol.All(cv.deprecated("mars", default=False), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert (
"The 'mars' option is deprecated, please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key_and_default(caplog, schema):
"""
Test deprecation with a replacement key and default.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter", default=False), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert {"venus": True, "jupiter": False} == output
deprecated_schema_with_default = vol.All(
vol.Schema(
{
"venus": cv.boolean,
vol.Optional("mars", default=False): cv.boolean,
vol.Optional("jupiter", default=False): cv.boolean,
}
),
cv.deprecated("mars", replacement_key="jupiter", default=False),
)
test_data = {"mars": True}
output = deprecated_schema_with_default(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
def test_deprecated_cant_find_module():
"""Test if the current module cannot be inspected."""
with patch("inspect.getmodule", return_value=None):
# This used to raise.
cv.deprecated(
"mars",
replacement_key="jupiter",
default=False,
)
def test_deprecated_logger_with_config_attributes(caplog):
"""Test if the logger outputs the correct message if the line and file attribute is available in config."""
file: str = "configuration.yaml"
line: int = 54
replacement = f"'mars' option near {file}:{line} is deprecated"
config = OrderedDict([("mars", "blah")])
setattr(config, "__config_file__", file)
setattr(config, "__line__", line)
cv.deprecated("mars", replacement_key="jupiter", default=False)(config)
assert len(caplog.records) == 1
assert replacement in caplog.text
caplog.clear()
assert len(caplog.records) == 0
def test_deprecated_logger_with_one_config_attribute(caplog):
"""Test if the logger outputs the correct message if only one of line and file attribute is available in config."""
file: str = "configuration.yaml"
line: int = 54
replacement = f"'mars' option near {file}:{line} is deprecated"
config = OrderedDict([("mars", "blah")])
setattr(config, "__config_file__", file)
cv.deprecated("mars", replacement_key="jupiter", default=False)(config)
assert len(caplog.records) == 1
assert replacement not in caplog.text
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
caplog.clear()
assert len(caplog.records) == 0
config = OrderedDict([("mars", "blah")])
setattr(config, "__line__", line)
cv.deprecated("mars", replacement_key="jupiter", default=False)(config)
assert len(caplog.records) == 1
assert replacement not in caplog.text
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
caplog.clear()
assert len(caplog.records) == 0
def test_deprecated_logger_without_config_attributes(caplog):
"""Test if the logger outputs the correct message if the line and file attribute is not available in config."""
file: str = "configuration.yaml"
line: int = 54
replacement = f"'mars' option near {file}:{line} is deprecated"
config = OrderedDict([("mars", "blah")])
cv.deprecated("mars", replacement_key="jupiter", default=False)(config)
assert len(caplog.records) == 1
assert replacement not in caplog.text
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
caplog.clear()
assert len(caplog.records) == 0
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency("beer", "soda"))
options = {"beer": None}
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ({"beer": None, "soda": None}, {"soda": None}, {})
for value in options:
schema(value)
def test_has_at_most_one_key():
"""Test has_at_most_one_key validator."""
schema = vol.Schema(cv.has_at_most_one_key("beer", "soda"))
for value in (None, [], {"beer": None, "soda": None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({}, {"beer": None}, {"soda": None}):
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key("beer", "soda"))
for value in (None, [], {}, {"wine": None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({"beer": None}, {"soda": None}):
schema(value)
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema("value3")
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
schema = vol.Schema(cv.socket_timeout)
with pytest.raises(vol.Invalid):
schema(0.0)
with pytest.raises(vol.Invalid):
schema(-1)
assert schema(None) == _GLOBAL_DEFAULT_TIMEOUT
assert schema(1) == 1.0
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex(".*uiae.*"))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert schema(test_str) == test_str
def test_is_regex():
"""Test the is_regex validator."""
schema = vol.Schema(cv.is_regex)
with pytest.raises(vol.Invalid):
schema("(")
with pytest.raises(vol.Invalid):
schema({"a dict": "is not a regex"})
valid_re = ".*"
schema(valid_re)
def test_comp_entity_ids():
"""Test config validation for component entity IDs."""
schema = vol.Schema(cv.comp_entity_ids)
for valid in (
"ALL",
"all",
"AlL",
"light.kitchen",
["light.kitchen"],
["light.kitchen", "light.ceiling"],
[],
):
schema(valid)
for invalid in (["light.kitchen", "not-entity-id"], "*", ""):
with pytest.raises(vol.Invalid):
schema(invalid)
def test_uuid4_hex(caplog):
"""Test uuid validation."""
schema = vol.Schema(cv.uuid4_hex)
for value in ["Not a hex string", "0", 0]:
with pytest.raises(vol.Invalid):
schema(value)
with pytest.raises(vol.Invalid):
# the 13th char should be 4
schema("a03d31b22eee1acc9b90eec40be6ed23")
with pytest.raises(vol.Invalid):
# the 17th char should be 8-a
schema("a03d31b22eee4acc7b90eec40be6ed23")
_hex = uuid.uuid4().hex
assert schema(_hex) == _hex
assert schema(_hex.upper()) == _hex
def test_key_value_schemas():
"""Test key value schemas."""
schema = vol.Schema(
cv.key_value_schemas(
"mode",
{
"number": vol.Schema({"mode": "number", "data": int}),
"string": vol.Schema({"mode": "string", "data": str}),
},
)
)
with pytest.raises(vol.Invalid) as excinfo:
schema(True)
assert str(excinfo.value) == "Expected a dictionary"
for mode in None, "invalid":
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": mode})
assert (
str(excinfo.value)
== f"Unexpected value for mode: '{mode}'. Expected number, string"
)
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": "number", "data": "string-value"})
assert str(excinfo.value) == "expected int for dictionary value @ data['data']"
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": "string", "data": 1})
assert str(excinfo.value) == "expected str for dictionary value @ data['data']"
for mode, data in (("number", 1), ("string", "hello")):
schema({"mode": mode, "data": data})
def test_script(caplog):
"""Test script validation is user friendly."""
for data, msg in (
({"delay": "{{ invalid"}, "should be format 'HH:MM'"),
({"wait_template": "{{ invalid"}, "invalid template"),
({"condition": "invalid"}, "Unexpected value for condition: 'invalid'"),
({"event": None}, "string value is None for dictionary value @ data['event']"),
(
{"device_id": None},
"string value is None for dictionary value @ data['device_id']",
),
(
{"scene": "light.kitchen"},
"Entity ID 'light.kitchen' does not belong to domain 'scene'",
),
):
with pytest.raises(vol.Invalid) as excinfo:
cv.script_action(data)
assert msg in str(excinfo.value)
def test_whitespace():
"""Test whitespace validation."""
schema = vol.Schema(cv.whitespace)
for value in (
None,
"" "T",
"negative",
"lock",
"tr ue",
[],
[1, 2],
{"one": "two"},
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (" ", " "):
assert schema(value)
|
|
#
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""SCRAM authentication mechanisms for PyXMPP SASL implementation.
Normative reference:
- :RFC:`5802`
"""
from __future__ import absolute_import, division, unicode_literals
__docformat__ = "restructuredtext en"
import sys
import re
import logging
import hashlib
import hmac
from binascii import a2b_base64
from base64 import standard_b64encode
from .core import default_nonce_factory
from .exceptions import BadChallengeException, \
ExtraChallengeException, ServerScramError, BadSuccessException, \
NotAuthorizedException
logger = logging.getLogger("pyxmpp2_scram")
HASH_FACTORIES = {
"SHA-1": hashlib.sha1, # pylint: disable=E1101
"SHA-224": hashlib.sha224, # pylint: disable=E1101
"SHA-256": hashlib.sha256, # pylint: disable=E1101
"SHA-384": hashlib.sha384, # pylint: disable=E1101
"SHA-512": hashlib.sha512, # pylint: disable=E1101
"MD-5": hashlib.md5, # pylint: disable=E1101
}
VALUE_CHARS_RE = re.compile(br"^[\x21-\x2B\x2D-\x7E]+$")
_QUOTED_VALUE_RE = br"(?:[\x21-\x2B\x2D-\x7E]|=2C|=3D)+"
CLIENT_FIRST_MESSAGE_RE = re.compile(
br"^(?P<gs2_header>(?:y|n|p=(?P<cb_name>[a-zA-z0-9.-]+)),"
br"(?:a=(?P<authzid>" + _QUOTED_VALUE_RE + br"))?,)"
br"(?P<client_first_bare>(?P<mext>m=[^\000=]+,)?"
br"n=(?P<username>" + _QUOTED_VALUE_RE + br"),"
br"r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+)"
br"(?:,.*)?)$"
)
SERVER_FIRST_MESSAGE_RE = re.compile(
br"^(?P<mext>m=[^\000=]+,)?"
br"r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+),"
br"s=(?P<salt>[a-zA-Z0-9/+=]+),"
br"i=(?P<iteration_count>\d+)"
br"(?:,.*)?$"
)
CLIENT_FINAL_MESSAGE_RE = re.compile(
br"(?P<without_proof>c=(?P<cb>[a-zA-Z0-9/+=]+),"
br"(?:r=(?P<nonce>[\x21-\x2B\x2D-\x7E]+))"
br"(?:,.*)?)"
br",p=(?P<proof>[a-zA-Z0-9/+=]+)$"
)
SERVER_FINAL_MESSAGE_RE = re.compile(
br"^(?:e=(?P<error>[^,]+)|v=(?P<verifier>[a-zA-Z0-9/+=]+)(?:,.*)?)$")
class SCRAMOperations(object):
"""Functions used during SCRAM authentication and defined in the RFC.
"""
def __init__(self, hash_function_name):
self.hash_function_name = hash_function_name
self.hash_factory = HASH_FACTORIES[hash_function_name]
self.digest_size = self.hash_factory().digest_size
@staticmethod
def Normalize(str_):
"""The Normalize(str) function.
This one also accepts Unicode string input (in the RFC only UTF-8
strings are used).
"""
# pylint: disable=C0103
if isinstance(str_, bytes):
str_ = str_.decode("utf-8")
return str_.encode("utf-8")
def HMAC(self, key, str_):
"""The HMAC(key, str) function."""
# pylint: disable=C0103
return hmac.new(key, str_, self.hash_factory).digest()
def H(self, str_):
"""The H(str) function."""
# pylint: disable=C0103
return self.hash_factory(str_).digest()
if sys.version_info.major >= 3:
@staticmethod
# pylint: disable=C0103
def XOR(str1, str2):
"""The XOR operator for two byte strings."""
return bytes(a ^ b for a, b in zip(str1, str2))
else:
@staticmethod
# pylint: disable=C0103
def XOR(str1, str2):
"""The XOR operator for two byte strings."""
return "".join(chr(ord(a) ^ ord(b)) for a, b in zip(str1, str2))
def Hi(self, str_, salt, i):
"""The Hi(str, salt, i) function."""
# pylint: disable=C0103
Uj = self.HMAC(str_, salt + b"\000\000\000\001") # U1
result = Uj
for _ in range(2, i + 1):
Uj = self.HMAC(str_, Uj) # Uj = HMAC(str, Uj-1)
result = self.XOR(result, Uj) # ... XOR Uj-1 XOR Uj
return result
@staticmethod
def escape(data):
"""Escape the ',' and '=' characters for 'a=' and 'n=' attributes.
Replaces '=' with '=3D' and ',' with '=2C'.
:Parameters:
- `data`: string to escape
:Types:
- `data`: `bytes`
"""
return data.replace(b'=', b'=3D').replace(b',', b'=2C')
@staticmethod
def unescape(data):
"""Unescape the ',' and '=' characters for 'a=' and 'n=' attributes.
Reverse of `escape`.
:Parameters:
- `data`: string to unescape
:Types:
- `data`: `bytes`
"""
return data.replace(b'=2C', b',').replace(b'=3D', b'=')
class SCRAMClientAuthenticator(SCRAMOperations):
"""Provides SCRAM SASL authentication for a client.
:Ivariables:
- `password`: current authentication password
- `pformat`: current authentication password format
- `realm`: current authentication realm
"""
# pylint: disable-msg=R0902
def __init__(self, hash_name, channel_binding):
"""Initialize a `SCRAMClientAuthenticator` object.
:Parameters:
- `hash_function_name`: hash function name, e.g. ``"SHA-1"``
- `channel_binding`: `True` to enable channel binding
:Types:
- `hash_function_name`: `unicode`
- `channel_binding`: `bool`
"""
SCRAMOperations.__init__(self, hash_name)
self.name = "SCRAM-{0}".format(hash_name)
if channel_binding:
self.name += "-PLUS"
self.channel_binding = channel_binding
self.username = None
self.password = None
self.authzid = None
self._c_nonce = None
self._server_first_message = False
self._client_first_message_bare = False
self._gs2_header = None
self._finished = False
self._auth_message = None
self._salted_password = None
self._cb_data = None
@classmethod
def are_properties_sufficient(cls, properties):
return "username" in properties and "password" in properties
def start(self, properties):
self.username = properties["username"]
self.password = properties["password"]
self.authzid = properties.get("authzid", "")
c_nonce = properties.get("nonce_factory", default_nonce_factory)()
if not VALUE_CHARS_RE.match(c_nonce):
c_nonce = standard_b64encode(c_nonce)
self._c_nonce = c_nonce
if self.channel_binding:
cb_data = properties.get("channel-binding")
if not cb_data:
raise ValueError("No channel binding data provided")
if "tls-unique" in cb_data:
cb_type = "tls-unique"
elif "tls-server-end-point" in cb_data:
cb_type = "tls-server-end-point"
elif cb_data:
cb_type = cb_data.keys()[0]
self._cb_data = cb_data[cb_type]
cb_flag = b"p=" + cb_type.encode("utf-8")
else:
plus_name = self.name + "-PLUS"
if plus_name in properties.get("enabled_mechanisms", []):
# -PLUS is enabled (supported) on our side,
# but was not selected - that means it was not included
# in the server features
cb_flag = b"y"
else:
cb_flag = b"n"
if self.authzid:
authzid = b"a=" + self.escape(self.authzid.encode("utf-8"))
else:
authzid = b""
gs2_header = cb_flag + b"," + authzid + b","
self._gs2_header = gs2_header
nonce = b"r=" + c_nonce
client_first_message_bare = (b"n=" +
self.escape(self.username.encode("utf-8")) + b"," + nonce)
self._client_first_message_bare = client_first_message_bare
client_first_message = gs2_header + client_first_message_bare
return client_first_message
def challenge(self, challenge):
"""Process a challenge and return the response.
:Parameters:
- `challenge`: the challenge from server.
:Types:
- `challenge`: `bytes`
:return: the response
:returntype: bytes
:raises: `BadChallengeException`
"""
# pylint: disable=R0911
if not challenge:
raise BadChallengeException('Empty challenge')
if self._server_first_message:
return self._final_challenge(challenge)
match = SERVER_FIRST_MESSAGE_RE.match(challenge)
if not match:
raise BadChallengeException("Bad challenge syntax: {0!r}".format(challenge))
self._server_first_message = challenge
mext = match.group("mext")
if mext:
raise BadChallengeException("Unsupported extension received: {0!r}".format(mext))
nonce = match.group("nonce")
if not nonce.startswith(self._c_nonce):
raise BadChallengeException("Nonce does not start with our nonce")
salt = match.group("salt")
try:
salt = a2b_base64(salt)
except ValueError:
raise BadChallengeException("Bad base64 encoding for salt: {0!r}".format(salt))
iteration_count = match.group("iteration_count")
try:
iteration_count = int(iteration_count)
except ValueError:
raise BadChallengeException("Bad iteration_count: {0!r}".format(iteration_count))
return self._make_response(nonce, salt, iteration_count)
def _make_response(self, nonce, salt, iteration_count):
"""Make a response for the first challenge from the server.
:return: the response
:returntype: bytes
"""
self._salted_password = self.Hi(self.Normalize(self.password), salt,
iteration_count)
self.password = None # not needed any more
if self.channel_binding:
channel_binding = b"c=" + standard_b64encode(self._gs2_header +
self._cb_data)
else:
channel_binding = b"c=" + standard_b64encode(self._gs2_header)
# pylint: disable=C0103
client_final_message_without_proof = (channel_binding + b",r=" + nonce)
client_key = self.HMAC(self._salted_password, b"Client Key")
stored_key = self.H(client_key)
auth_message = ( self._client_first_message_bare + b"," +
self._server_first_message + b"," +
client_final_message_without_proof )
self._auth_message = auth_message
client_signature = self.HMAC(stored_key, auth_message)
client_proof = self.XOR(client_key, client_signature)
proof = b"p=" + standard_b64encode(client_proof)
client_final_message = (client_final_message_without_proof + b"," +
proof)
return client_final_message
def _final_challenge(self, challenge):
"""Process the second challenge from the server and return the
response.
:Parameters:
- `challenge`: the challenge from server.
:Types:
- `challenge`: `bytes`
:raises: `ExtraChallengeException`, `BadChallengeException`, `ServerScramError`, or `BadSuccessException`
"""
if self._finished:
return ExtraChallengeException()
match = SERVER_FINAL_MESSAGE_RE.match(challenge)
if not match:
raise BadChallengeException("Bad final message syntax: {0!r}".format(challenge))
error = match.group("error")
if error:
raise ServerScramError("{0!r}".format(error))
verifier = match.group("verifier")
if not verifier:
raise BadSuccessException("No verifier value in the final message")
server_key = self.HMAC(self._salted_password, b"Server Key")
server_signature = self.HMAC(server_key, self._auth_message)
if server_signature != a2b_base64(verifier):
raise BadSuccessException("Server verifier does not match")
self._finished = True
def finish(self, data):
"""Process success indicator from the server.
Process any addiitional data passed with the success.
Fail if the server was not authenticated.
:Parameters:
- `data`: an optional additional data with success.
:Types:
- `data`: `bytes`
:return: username and authzid
:returntype: `dict`
:raises: `BadSuccessException`"""
if not self._server_first_message:
raise BadSuccessException("Got success too early")
if self._finished:
return {"username": self.username, "authzid": self.authzid}
else:
self._final_challenge(data)
if self._finished:
return {"username": self.username,
"authzid": self.authzid}
else:
raise BadSuccessException("Something went wrong when processing additional"
" data with success?")
class SCRAMServerAuthenticator(SCRAMOperations):
"""Provides SCRAM SASL authentication for a server.
"""
def __init__(self, hash_name, channel_binding, password_database):
"""Initialize a `SCRAMClientAuthenticator` object.
:Parameters:
- `hash_function_name`: hash function name, e.g. ``"SHA-1"``
- `channel_binding`: `True` to enable channel binding
:Types:
- `hash_function_name`: `unicode`
- `channel_binding`: `bool`
"""
SCRAMOperations.__init__(self, hash_name)
self.name = "SCRAM-{0}".format(hash_name)
if channel_binding:
self.name += "-PLUS"
self.channel_binding = channel_binding
self.properties = None
self.out_properties = None
self.password_database = password_database
self._client_first_message_bare = None
self._stored_key = None
self._server_key = None
def start(self, properties, initial_response):
self.properties = properties
self._client_first_message_bare = None
self.out_properties = {}
if not initial_response:
return b""
return self.response(initial_response)
def response(self, response):
if self._client_first_message_bare:
logger.debug("Client final message: {0!r}".format(response))
return self._handle_final_response(response)
else:
logger.debug("Client first message: {0!r}".format(response))
return self._handle_first_response(response)
def _handle_first_response(self, response):
match = CLIENT_FIRST_MESSAGE_RE.match(response)
if not match:
raise NotAuthorizedException("Bad response syntax: {0!r}".format(response))
mext = match.group("mext")
if mext:
raise NotAuthorizedException("Unsupported extension received: {0!r}".format(mext))
gs2_header = match.group("gs2_header")
cb_name = match.group("cb_name")
if self.channel_binding:
if not cb_name:
raise NotAuthorizedException("{0!r} used with no channel-binding"
.format(self.name))
cb_name = cb_name.decode("utf-8")
if cb_name not in self.properties["channel-binding"]:
raise NotAuthorizedException("Channel binding data type {0!r} not available"
.format(cb_name))
else:
if gs2_header.startswith(b'y'):
plus_name = self.name + "-PLUS"
if plus_name in self.properties.get("enabled_mechanisms", []):
raise NotAuthorizedException("Channel binding downgrade attack detected")
elif gs2_header.startswith(b'p'):
# is this really an error?
raise NotAuthorizedException("Channel binding requested for {0!r}"
.format(self.name))
authzid = match.group("authzid")
if authzid:
self.out_properties['authzid'] = self.unescape(authzid
).decode("utf-8")
else:
self.out_properties['authzid'] = None
username = self.unescape(match.group("username")).decode("utf-8")
self.out_properties['username'] = username
nonce_factory = self.properties.get("nonce_factory",
default_nonce_factory)
properties = dict(self.properties)
properties.update(self.out_properties)
s_pformat = "SCRAM-{0}-SaltedPassword".format(self.hash_function_name)
k_pformat = "SCRAM-{0}-Keys".format(self.hash_function_name)
password, pformat = self.password_database.get_password(username,
(s_pformat, "plain"), properties)
if pformat == s_pformat:
if password is not None:
salt, iteration_count, salted_password = password
else:
logger.debug("No password for user {0!r}".format(username))
elif pformat != k_pformat:
salt = self.properties.get("SCRAM-salt")
if not salt:
salt = nonce_factory()
iteration_count = self.properties.get("SCRAM-iteration-count", 4096)
if pformat == "plain" and password is not None:
salted_password = self.Hi(self.Normalize(password), salt,
iteration_count)
else:
logger.debug("No password for user {0!r}".format(username))
password = None
# to prevent timing attack, compute the key anyway
salted_password = self.Hi(self.Normalize(""), salt,
iteration_count)
if pformat == k_pformat:
salt, iteration_count, stored_key, server_key = password
else:
client_key = self.HMAC(salted_password, b"Client Key")
stored_key = self.H(client_key)
server_key = self.HMAC(salted_password, b"Server Key")
if password is not None:
self._stored_key = stored_key
self._server_key = server_key
else:
self._stored_key = None
self._server_key = None
c_nonce = match.group("nonce")
s_nonce = nonce_factory()
if not VALUE_CHARS_RE.match(s_nonce):
s_nonce = standard_b64encode(s_nonce)
nonce = c_nonce + s_nonce
server_first_message = (
b"r=" + nonce
+ b",s=" + standard_b64encode(salt)
+ b",i=" + str(iteration_count).encode("utf-8")
)
self._nonce = nonce
self._cb_name = cb_name
self._gs2_header = gs2_header
self._client_first_message_bare = match.group("client_first_bare")
self._server_first_message = server_first_message
return server_first_message
def _handle_final_response(self, response):
match = CLIENT_FINAL_MESSAGE_RE.match(response)
if not match:
raise NotAuthorizedException("Bad response syntax: {0!r}".format(response))
if match.group("nonce") != self._nonce:
raise NotAuthorizedException("Bad nonce in the final client response")
cb_input = a2b_base64(match.group("cb"))
if not cb_input.startswith(self._gs2_header):
raise NotAuthorizedException("GS2 header in the final response ({0!r}) doesn't"
" match the one sent in the first message ({1!r})"
.format(cb_input, self._gs2_header))
if self._cb_name:
cb_data = cb_input[len(self._gs2_header):]
if cb_data != self.properties["channel-binding"][self._cb_name]:
raise NotAuthorizedException("Channel binding data doesn't match")
proof = a2b_base64(match.group("proof"))
auth_message = (self._client_first_message_bare + b"," +
self._server_first_message + b"," +
match.group("without_proof"))
if self._stored_key is None:
# compute something to prevent timing attack
client_signature = self.HMAC(b"", auth_message)
client_key = self.XOR(client_signature, proof)
self.H(client_key)
raise NotAuthorizedException("Authentication failed (bad username)")
client_signature = self.HMAC(self._stored_key, auth_message)
client_key = self.XOR(client_signature, proof)
if self.H(client_key) != self._stored_key:
raise NotAuthorizedException("Authentication failed")
server_signature = self.HMAC(self._server_key, auth_message)
server_final_message = b"v=" + standard_b64encode(server_signature)
return (self.out_properties, server_final_message)
|
|
import functools, operator, os, types, sys, itertools
import datetime, cgi, time
import six, mimetypes
WIN32 = True if sys.platform == 'win32' else False
try:
# Python3
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class config:
root = [
# ('/', ('./index.html', 'text/html')),
# ('/', ('./', 'text/html')),
# ('/sample.dir', ('./sample.dir', 'application/x-shockwave-flash')),
# ('/test.dir', ('./test.dir', 'application/x-shockwave-flash')),
('/', ('goatse.cx.jpg', 'image/jpeg')),
]
def getfiledata(filename,mime):
def l(q):
f = open(filename, 'rb')
res = f.read()
f.close()
return res, mime
return l
def listdirectory(baseuri, path):
assert baseuri.endswith('/')
def row(*args):
return '<tr>%s</tr>'%( ''.join(('<td>%s</td>'%x for x in args)) )
def l(q):
result = ['<table>']
result.append( row(*['<i>%s</i>'%x for x in ('path','size','created','modified')]) )
result.append( row( '<a href="%s../">..</a>'% baseuri) )
for name in find(path, root=path, depth=0):
contents = name.replace(os.sep, '/')
uri = '%s%s'% (baseuri, contents)
_,_,_,_,_,_,sz,at,mt,ct = os.stat('%s%s'%(path,name))
result.append( row('<a href="%s">%s</a>'%(uri,contents), str(sz), time.ctime(ct),time.ctime(mt)) )
return ''.join(result).encode('latin1'),'text/html'
return l
def find(path='.'+os.sep, depth=None, root='.'+os.sep):
if isinstance(depth, six.integer_types):
if depth < 0:
return
depth -= 1
for name in os.listdir(path):
fullpath = os.path.relpath(os.path.join(path, name), start=root)
if os.path.isdir(root+fullpath):
yield fullpath + os.sep
for x in find(root+fullpath, depth, root=root):
yield x
continue
yield fullpath
return
class fileserver(BaseHTTPRequestHandler):
filesystem = None # dict
root = config.root
def __init__(self, *kwds, **args):
self.init_filesystem()
BaseHTTPRequestHandler.__init__(self, *kwds, **args)
def init_filesystem(self):
self.filesystem = {}
for path,loc in self.root:
if not isinstance(loc, tuple):
self.filesystem[path] = loc
continue
if path.endswith('/') and loc[0][-1] in (os.sep,'/'):
directory,options = loc
self.add_directory(path, directory)
continue
filename,mime = loc
self.add_file(path, filename, mime)
return
def add_file(self, key, path, mime):
if mime is None:
(mime,encoding) = mimetypes.guess_type(path)
self.filesystem[key] = getfiledata(path, mime)
def add_directory(self, key, path):
assert key.endswith('/')
directory = path.replace('/', os.sep)
assert directory.endswith(os.sep)
for x in find(directory, root=directory, depth=0):
uri = key + x.replace(os.sep, '/')
fullpath = path+x
if x.endswith(os.sep):
self.add_directory(uri, fullpath)
continue
self.filesystem[uri] = getfiledata(fullpath, mimetypes.guess_type(x))
self.filesystem[key] = listdirectory(key, directory)
def send_contents(self, data, mime):
self.send_response(200)
self.send_header('Content-Type', mime)
self.send_header('Content-Length', len(data))
self.end_headers()
self.wfile.write(data)
def do_GET(self):
path,query=self.path,''
if '?' in self.path:
path,query = self.path.split('?', 1)
try:
data = self.filesystem[path]
except KeyError:
self.send_error(404, 'File not found: %s'% self.path)
return
contents,mime = data(query)
self.send_contents(contents, mime)
def parse_args():
import argparse
res = argparse.ArgumentParser(description='Serve up some explicit files over the HTTP protocol and its derivatives')
res.add_argument('-ssl', dest='use_ssl', action='store_true', default=False, help='Use SSL when binding to requested port.')
ssl = res.add_argument_group('available ssl options', description='Options available when binding to a port with SSL.')
ssl.add_argument('-key', dest='keyfile', metavar='FILE', action='store', help='Use the specified key when serving SSL (generate one if none specified)')
ssl.add_argument('-keysize', dest='keysize', metavar='BITS', default=1024, action='store', help='Use the specified number of bits when generating the key')
ssl.add_argument('-keyout', dest='keypath', metavar='PATH', action='store', help='Write the key that is used to the path that is specified')
ssl.add_argument('-cert', dest='certificate', metavar='FILE', action='store', help='Use the specified x509 certificate when serving SSL (self-sign one using key if none specified)')
ssl.add_argument('-param', dest='parameters', metavar=('ATTRIBUTE', 'VALUE'), action='append', nargs=2, help='Use the specified parameters (attribute name = attribute value) when generating the x509 certificate')
ssl.add_argument('-certout', dest='certificatepath', metavar='PATH', action='store', help='Write the certificate that is used to the path that is specified')
res.add_argument(dest='hostport', metavar='host:port', action='store')
return res.parse_args()
def gen_key(e=65537, bits=1024):
import cryptography.hazmat.primitives.asymmetric.rsa as chpar
import cryptography.hazmat.primitives.serialization as chps
import cryptography.hazmat.backends as chb
print("generating an RSA key of {:d}-bit{:s} using e={:d}.".format(bits, '' if bits == 1 else 's', e))
key = chpar.generate_private_key(public_exponent=e, key_size=bits, backend=chb.default_backend())
pem = key.private_bytes(encoding=chps.Encoding.PEM, format=chps.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=chps.NoEncryption())
return key, pem
def load_key(content, password=None):
import cryptography.hazmat.primitives.serialization as chps
import cryptography.hazmat.backends as chb
print("loading RSA key from {:d} bytes worth of file.".format(len(content)))
try:
key = chps.load_pem_private_key(data=content, password=password, backend=chb.default_backend())
except ValueError:
print('critical: error while decoding key, generating a temporary one instead.\n')
return gen_key()
except TypeError:
pass
else:
pem = key.private_bytes(encoding=chps.Encoding.PEM, format=chps.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=chps.NoEncryption())
return key, pem
try:
password = input("key is encrypted, please type in your password (ctrl+c to give up): ")
except KeyboardInterrupt:
print("warning: user aborted key decryption, generating a temporary one instead.\n")
return gen_key()
return load_key(content, password)
def gen_certificate(private, **params):
from datetime import datetime as delirium_tremens
now = delirium_tremens.utcnow()
params.setdefault('serial_number', 1024)
params.setdefault('not_valid_before', now)
params.setdefault('not_valid_after', params['not_valid_before'] + datetime.timedelta(days=42))
params.setdefault('hashAlgorithm', 'sha256')
import cryptography.x509 as X
import ipaddress as inet
hostname = os.environ.get('HOSTNAME', 'localhost')
cn = X.Name([X.NameAttribute(X.oid.NameOID.COMMON_NAME, hostname)])
params['subject_name'] = cn
params.setdefault('issuer_name', cn)
global host, port
address = inet.ip_address(host)
alts = [token(item) for token, item in zip([X.DNSName, X.IPAddress], [host, address])]
an = X.SubjectAlternativeName(alts)
bc = X.BasicConstraints(ca=True, path_length=0)
import cryptography.hazmat.primitives as chp
namespace = map(functools.partial(getattr, chp.hashes), dir(chp.hashes))
algorithm_types = (item for item in namespace if isinstance(item, type))
algorithms = {cons.name : cons for cons in algorithm_types if issubclass(cons, chp.hashes.HashAlgorithm) and cons is not chp.hashes.HashAlgorithm}
suggestion = params.pop('hashAlgorithm')
if operator.contains(algorithms, suggestion):
hashAlgorithm = algorithms[suggestion]
else:
print("critical: suggested hash algorithm ({:s}) was not found in the available algoritms ({:s}).".format(suggestion, ', '.join(sorted(algorithms))))
hashAlgorithm = algorithms[next(name for name in itertools.chain(['sha1', 'md5'], algorithms) if operator.contains(algorithms, name))]
print("warning: ended up falling back to an alternative one ({:s}).\n".format(hashAlgorithm.name))
import cryptography.hazmat.backends as chb
import cryptography.hazmat.primitives as chp
params['issuer_name'] = X.Name([X.NameAttribute(X.oid.NameOID.COMMON_NAME, params['issuer_name'])]) if isinstance(params['issuer_name'], six.string_types) else params['issuer_name']
print("generating {:s}certificate issued by {:s} for {:s} ({:s}).".format('self-signed ' if params['issuer_name'] == params['subject_name'] else '', params['issuer_name'].rfc4514_string(), params['subject_name'].rfc4514_string(), ', '.join(map("{!s}".format, an))))
try:
x509 = functools.reduce(lambda agg, attribute_value: (lambda attribute, value: getattr(agg, attribute)(int(value) if isinstance(value, six.string_types) and value.isdigit() else value))(*attribute_value), params.items(), X.CertificateBuilder())
except AttributeError:
available = {attribute for attribute in dir(X.CertificateBuilder) if not attribute.startswith('_')} | {'hashAlgorithm'}
misses = {choice for choice in params} - available
print("critical: unable to generate certificate due to the explicitly given parameters ({:s}) not being within the ones available ({:s}).".format(', '.join(misses), ', '.join(available)))
print('trying again without the invalid parameters.\n')
[ params.pop(attribute) for attribute in misses ]
params['hashAlgorithm'] = hashAlgorithm.name
return gen_certificate(private, **params)
else:
print('adding necessary extensions to certificate and signing it.')
extended = x509.add_extension(bc, False).add_extension(an, False).public_key(private.public_key())
try:
certificate = extended.sign(private_key=private, algorithm=hashAlgorithm(), backend=chb.default_backend())
except (ValueError, TypeError):
print("critical: error signing certificate likely due to the hashAlgorithm ({:s}) not being viable.".format(hashAlgorithm.name))
print('trying again using a default algorithm.\n')
return gen_certificate(private, **params)
assert isinstance(certificate, X.Certificate)
return certificate, certificate.public_key()
def load_certificate(private, content):
import cryptography.x509 as X
import cryptography.hazmat.primitives.asymmetric.padding as chpap
import cryptography.hazmat.primitives.serialization as chps
print("reading an X509 certificate from {:d} bytes worth of PEM.".format(len(content)))
try:
certificate = X.load_pem_x509_certificate(data=content)
except ValueError:
print("critical: error while decoding certificate, generating one instead.\n")
return gen_certificate(private)
import cryptography
print('verifying the private key matches the following public key from the certificate.\n')
print(certificate.public_key().public_bytes(encoding=chps.Encoding.PEM, format=chps.PublicFormat.SubjectPublicKeyInfo).decode(sys.getdefaultencoding()))
try:
private.public_key().verify(signature=certificate.signature, data=certificate.tbs_certificate_bytes, padding=chpap.PKCS1v15(), algorithm=certificate.signature_hash_algorithm)
except cryptography.exceptions.InvalidSignature:
print("critical: the certificate's public key does not match the private key, generating a new certificate instead.\n")
return gen_certificate(private)
else:
print('which definitely seems to be the case.')
return certificate, certificate.public_key()
def hardlink(src, dst):
if os.path.isfile(dst):
print("warning: removing file at the specified target path ({:s}).".format(dst))
os.unlink(dst)
elif os.path.exists(dst):
print("critical: refusing to overwrite target path ({:s}) due to target not being a file.".format(dst))
return
return os.link(src, dst)
def setup_ssl(socket, arguments):
try:
import cryptography, ssl
import cryptography.hazmat.primitives.serialization as chps
except ImportError:
print('warning: ignoring request for SSL support due to an error importing the necessary libraries.')
return socket
python_ssl_is_fucking_stupid = {}
if arguments.keyfile:
with open(arguments.keyfile, 'rb') as infile:
content = infile.read()
key, pem = load_key(content)
else:
key, pem = gen_key(bits=arguments.keysize)
print("using the following {:d}-bit key.\n".format(key.key_size))
print(pem.decode(sys.getdefaultencoding()))
python_ssl_is_fucking_stupid['keydata'] = pem
parameters = {attribute : value for attribute, value in arguments.parameters or []}
if arguments.certificate:
if parameters:
print('warning: ignoring the provided certificate parameters due to being asked to load certificate from file.\n')
with open(arguments.certificate, 'rb') as infile:
content = infile.read()
cert, pk = load_certificate(key, content)
else:
cert, pk = gen_certificate(key, **parameters)
sig = bytearray(cert.fingerprint(algorithm=cert.signature_hash_algorithm))
pem = cert.public_bytes(encoding=chps.Encoding.PEM)
print("\nusing certificate with a {:d}-bit {:s} ({:s})\n{:s}\n".format(8 * len(sig), cert.signature_hash_algorithm.name, cert.signature_algorithm_oid.dotted_string, ':'.join(map("{:02x}".format, sig))))
print(pem.decode(sys.getdefaultencoding()))
python_ssl_is_fucking_stupid['certdata'] = pem
import tempfile
with tempfile.NamedTemporaryFile(prefix='poc', delete=not WIN32) as keyfile, tempfile.NamedTemporaryFile(prefix='poc', delete=not WIN32) as certfile:
keyfile.write(python_ssl_is_fucking_stupid['keydata'])
if arguments.keypath:
hardlink(keyfile.name, arguments.keypath)
print("wrote key data to {:s}.".format(arguments.keypath))
certfile.write(python_ssl_is_fucking_stupid['certdata'])
if arguments.certificatepath:
hardlink(certfile.name, arguments.certificatepath)
print("wrote certificate data to {:s}.".format(arguments.certificatepath))
if WIN32: [ file.close() for file in [keyfile, certfile] ]
wrap_the_bitch_using_filenames_because_python_is_fucking_stupid = ssl.wrap_socket(socket, server_side=True, keyfile=keyfile.name, certfile=certfile.name)
if WIN32: keyfile, certfile = (open(file.name) for file in [keyfile, certfile])
return wrap_the_bitch_using_filenames_because_python_is_fucking_stupid
if __name__ == '__main__':
arguments = parse_args()
host, port = arguments.hostport.split(':')
port = int(port)
httpd = HTTPServer((host, port), fileserver)
if arguments.use_ssl:
print('setting up ssl on socket as per user request.\n')
httpd.socket = setup_ssl(httpd.socket, arguments)
print('bound to %s:%d'% (host, port))
print('...and we\'re off.')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
sys.exit(0)
|
|
import re
from django.template import Node, Variable, VariableNode
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.template.base import _render_value_in_context
from django.utils import translation
from django.utils.encoding import force_unicode
from django.template.defaulttags import token_kwargs
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context.get(v, ''), context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
pos = None
m = re.match("^'([^']+)'(\|.*$)",value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError("only option for 'trans' is 'noop'")
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = options['count'].items()[0]
else:
countervar, counter = None, None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_language_info', do_get_language_info)
register.tag('get_language_info_list', do_get_language_info_list)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
register.filter(language_name)
register.filter(language_name_local)
register.filter(language_bidi)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ipsecprofile(base_resource) :
""" Configuration for IPSEC profile resource. """
def __init__(self) :
self._name = ""
self._ikeversion = ""
self._encalgo = []
self._hashalgo = []
self._lifetime = 0
self._psk = ""
self._publickey = ""
self._privatekey = ""
self._peerpublickey = ""
self._livenesscheckinterval = 0
self._replaywindowsize = 0
self._ikeretryinterval = 0
self._retransmissiontime = 0
self._builtin = []
self.___count = 0
@property
def name(self) :
ur"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""The name of the ipsec profile.<br/>Minimum length = 1<br/>Maximum length = 32
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ikeversion(self) :
ur"""IKE Protocol Version.<br/>Possible values = V1, V2.
"""
try :
return self._ikeversion
except Exception as e:
raise e
@ikeversion.setter
def ikeversion(self, ikeversion) :
ur"""IKE Protocol Version.<br/>Possible values = V1, V2
"""
try :
self._ikeversion = ikeversion
except Exception as e:
raise e
@property
def encalgo(self) :
ur"""Type of encryption algorithm.<br/>Possible values = AES, 3DES.
"""
try :
return self._encalgo
except Exception as e:
raise e
@encalgo.setter
def encalgo(self, encalgo) :
ur"""Type of encryption algorithm.<br/>Possible values = AES, 3DES
"""
try :
self._encalgo = encalgo
except Exception as e:
raise e
@property
def hashalgo(self) :
ur"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5.
"""
try :
return self._hashalgo
except Exception as e:
raise e
@hashalgo.setter
def hashalgo(self, hashalgo) :
ur"""Type of hashing algorithm.<br/>Possible values = HMAC_SHA1, HMAC_SHA256, HMAC_SHA384, HMAC_SHA512, HMAC_MD5
"""
try :
self._hashalgo = hashalgo
except Exception as e:
raise e
@property
def lifetime(self) :
ur"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000.
"""
try :
return self._lifetime
except Exception as e:
raise e
@lifetime.setter
def lifetime(self, lifetime) :
ur"""Lifetime of IKE SA in seconds. Lifetime of IPSec SA will be (lifetime of IKE SA/8).<br/>Minimum length = 480<br/>Maximum length = 31536000
"""
try :
self._lifetime = lifetime
except Exception as e:
raise e
@property
def psk(self) :
ur"""Pre shared key value.
"""
try :
return self._psk
except Exception as e:
raise e
@psk.setter
def psk(self, psk) :
ur"""Pre shared key value.
"""
try :
self._psk = psk
except Exception as e:
raise e
@property
def publickey(self) :
ur"""Public key file path.
"""
try :
return self._publickey
except Exception as e:
raise e
@publickey.setter
def publickey(self, publickey) :
ur"""Public key file path.
"""
try :
self._publickey = publickey
except Exception as e:
raise e
@property
def privatekey(self) :
ur"""Private key file path.
"""
try :
return self._privatekey
except Exception as e:
raise e
@privatekey.setter
def privatekey(self, privatekey) :
ur"""Private key file path.
"""
try :
self._privatekey = privatekey
except Exception as e:
raise e
@property
def peerpublickey(self) :
ur"""Peer public key file path.
"""
try :
return self._peerpublickey
except Exception as e:
raise e
@peerpublickey.setter
def peerpublickey(self, peerpublickey) :
ur"""Peer public key file path.
"""
try :
self._peerpublickey = peerpublickey
except Exception as e:
raise e
@property
def livenesscheckinterval(self) :
ur"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999.
"""
try :
return self._livenesscheckinterval
except Exception as e:
raise e
@livenesscheckinterval.setter
def livenesscheckinterval(self, livenesscheckinterval) :
ur"""Number of seconds after which a notify payload is sent to check the liveliness of the peer. Additional retries are done as per retransmit interval setting. Zero value disables liveliness checks.<br/>Maximum length = 64999
"""
try :
self._livenesscheckinterval = livenesscheckinterval
except Exception as e:
raise e
@property
def replaywindowsize(self) :
ur"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384.
"""
try :
return self._replaywindowsize
except Exception as e:
raise e
@replaywindowsize.setter
def replaywindowsize(self, replaywindowsize) :
ur"""IPSec Replay window size for the data traffic.<br/>Maximum length = 16384
"""
try :
self._replaywindowsize = replaywindowsize
except Exception as e:
raise e
@property
def ikeretryinterval(self) :
ur"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600.
"""
try :
return self._ikeretryinterval
except Exception as e:
raise e
@ikeretryinterval.setter
def ikeretryinterval(self, ikeretryinterval) :
ur"""IKE retry interval for bringing up the connection.<br/>Minimum length = 60<br/>Maximum length = 3600
"""
try :
self._ikeretryinterval = ikeretryinterval
except Exception as e:
raise e
@property
def retransmissiontime(self) :
ur"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99.
"""
try :
return self._retransmissiontime
except Exception as e:
raise e
@retransmissiontime.setter
def retransmissiontime(self, retransmissiontime) :
ur"""The interval in seconds to retry sending the IKE messages to peer, three consecutive attempts are done with doubled interval after every failure.<br/>Minimum length = 1<br/>Maximum length = 99
"""
try :
self._retransmissiontime = retransmissiontime
except Exception as e:
raise e
@property
def builtin(self) :
ur"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ipsecprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipsecprofile
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add ipsecprofile.
"""
try :
if type(resource) is not list :
addresource = ipsecprofile()
addresource.name = resource.name
addresource.ikeversion = resource.ikeversion
addresource.encalgo = resource.encalgo
addresource.hashalgo = resource.hashalgo
addresource.lifetime = resource.lifetime
addresource.psk = resource.psk
addresource.publickey = resource.publickey
addresource.privatekey = resource.privatekey
addresource.peerpublickey = resource.peerpublickey
addresource.livenesscheckinterval = resource.livenesscheckinterval
addresource.replaywindowsize = resource.replaywindowsize
addresource.ikeretryinterval = resource.ikeretryinterval
addresource.retransmissiontime = resource.retransmissiontime
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ikeversion = resource[i].ikeversion
addresources[i].encalgo = resource[i].encalgo
addresources[i].hashalgo = resource[i].hashalgo
addresources[i].lifetime = resource[i].lifetime
addresources[i].psk = resource[i].psk
addresources[i].publickey = resource[i].publickey
addresources[i].privatekey = resource[i].privatekey
addresources[i].peerpublickey = resource[i].peerpublickey
addresources[i].livenesscheckinterval = resource[i].livenesscheckinterval
addresources[i].replaywindowsize = resource[i].replaywindowsize
addresources[i].ikeretryinterval = resource[i].ikeretryinterval
addresources[i].retransmissiontime = resource[i].retransmissiontime
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete ipsecprofile.
"""
try :
if type(resource) is not list :
deleteresource = ipsecprofile()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ ipsecprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the ipsecprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = ipsecprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = ipsecprofile()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [ipsecprofile() for _ in range(len(name))]
obj = [ipsecprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = ipsecprofile()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of ipsecprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ipsecprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the ipsecprofile resources configured on NetScaler.
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of ipsecprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = ipsecprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
class Encalgo:
AES = "AES"
_3DES = "3DES"
class Ikeversion:
V1 = "V1"
V2 = "V2"
class Hashalgo:
HMAC_SHA1 = "HMAC_SHA1"
HMAC_SHA256 = "HMAC_SHA256"
HMAC_SHA384 = "HMAC_SHA384"
HMAC_SHA512 = "HMAC_SHA512"
HMAC_MD5 = "HMAC_MD5"
class ipsecprofile_response(base_response) :
def __init__(self, length=1) :
self.ipsecprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipsecprofile = [ipsecprofile() for _ in range(length)]
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
def conv3d_forward_naive(input, filter, group, conv_param):
in_n, in_c, in_d, in_h, in_w = input.shape
out_c, f_c, f_d, f_h, f_w = filter.shape
assert f_c * group == in_c
assert np.mod(out_c, group) == 0
sub_out_c = out_c // group
stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
'dilations']
out_d = 1 + (in_d + 2 * pad[0] - (dilation[0] * (f_d - 1) + 1)) // stride[0]
out_h = 1 + (in_h + 2 * pad[1] - (dilation[1] * (f_h - 1) + 1)) // stride[1]
out_w = 1 + (in_w + 2 * pad[2] - (dilation[2] * (f_w - 1) + 1)) // stride[2]
out = np.zeros((in_n, out_c, out_d, out_h, out_w))
d_bolck_d = (dilation[0] * (f_d - 1) + 1)
d_bolck_h = (dilation[1] * (f_h - 1) + 1)
d_bolck_w = (dilation[2] * (f_w - 1) + 1)
input_pad = np.pad(input, ((0, ), (0, ), (pad[0], ), (pad[1], ),
(pad[2], )),
mode='constant',
constant_values=0)
filter_dilation = np.zeros((out_c, f_c, d_bolck_d, d_bolck_h, d_bolck_w))
filter_dilation[:, :, 0:d_bolck_d:dilation[0], 0:d_bolck_h:dilation[1], 0:
d_bolck_w:dilation[2]] = filter
for d in range(out_d):
for i in range(out_h):
for j in range(out_w):
for g in range(group):
input_pad_masked = \
input_pad[:, g * f_c:(g + 1) * f_c,
d * stride[0]:d * stride[0] + d_bolck_d,
i * stride[1]:i * stride[1] + d_bolck_h,
j * stride[2]:j * stride[2] + d_bolck_w]
f_sub = filter_dilation[g * sub_out_c:(g + 1) *
sub_out_c, :, :, :, :]
for k in range(sub_out_c):
out[:, g * sub_out_c + k, d, i, j] = \
np.sum(input_pad_masked * f_sub[k, :, :, :, :],
axis=(1, 2, 3, 4))
return out
class TestConv3dOp(OpTest):
def setUp(self):
self.op_type = "conv3d"
self.use_cudnn = False
self.use_mkldnn = False
self.data_format = "AnyLayout"
self.dtype = np.float32
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_test_case()
conv3d_param = {
'stride': self.stride,
'pad': self.pad,
'dilations': self.dilations
}
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.random(self.filter_size).astype(self.dtype)
output = conv3d_forward_naive(input, filter, self.groups,
conv3d_param).astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format
}
self.outputs = {'Output': output}
def testcudnn(self):
return core.is_compiled_with_cuda() and self.use_cudnn
def test_check_output(self):
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
self.check_output_with_place(place, atol=1e-5)
def test_check_grad(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
self.check_grad_with_place(
place, {'Input', 'Filter'}, 'Output', max_relative_error=0.03)
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace()
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.03,
no_grad_set=set(['Input']))
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
class TestCase1(TestConv3dOp):
def init_test_case(self):
self.pad = [1, 1, 1]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
class TestWithGroup1(TestConv3dOp):
def init_group(self):
self.groups = 3
class TestWithGroup2(TestCase1):
def init_group(self):
self.groups = 3
class TestWith1x1(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 4, 4, 4] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithInput1x1Filter1x1(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 1, 1, 1] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 1, 1, 1]
def init_dilation(self):
self.dilations = [1, 1, 1]
def init_group(self):
self.groups = 3
class TestWithDilation(TestConv3dOp):
def init_test_case(self):
self.pad = [0, 0, 0]
self.stride = [1, 1, 1]
self.input_size = [2, 3, 6, 6, 6] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 2, 2, 2]
def init_dilation(self):
self.dilations = [2, 2, 2]
def init_group(self):
self.groups = 3
#----------------Conv3dCUDNN----------------
class TestCUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16CUDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithGroup1CUDNN(TestWithGroup1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithGroup2CUDNN(TestWithGroup2):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWith1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16With1x1CUDNN(TestWith1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestWithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
class TestFP16WithInput1x1Filter1x1CUDNN(TestWithInput1x1Filter1x1):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNExhaustiveSearch(TestCUDNN):
def init_kernel_type(self):
self.use_cudnn = True
self.exhaustive_search = True
# FIXME(typhoonzero): find a way to determine if
# using cudnn > 6 in python
# class TestWithDilationCUDNN(TestWithDilation):
# def init_op_type(self):
# self.op_type = "conv3d"
if __name__ == '__main__':
unittest.main()
|
|
import settings, sys, os, re, socket, requests, logging, traceback, inspect, string, random
from flask import request, render_template, make_response
from werkzeug.utils import escape
from werkzeug.urls import iri_to_uri
def ipv4Test(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def ipv6Test(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def isValidIP(address, tcp_proto=''):
"""
Determine if ip address is valid
:address: str
:tcp_proto: str
:ref: <https://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python>
"""
if tcp_proto == '4':
return ipv4Test(address)
elif tcp_proto == '6':
return ipv6Test(address)
else:
if not ipv4Test(address) and not ipv6Test(address):
return False
return True
def getInternalIP():
""" Returns current ip of system """
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def getExternalIP():
""" Returns external ip of system """
ip = None
# redundancy in case a service provider goes down
externalip_services = [
"https://ipv4.icanhazip.com",
"https://api.ipify.org",
"https://myexternalip.com/raw",
"https://ipecho.net/plain",
"https://bot.whatismyipaddress.com"
]
for url in externalip_services:
try:
ip = requests.get(url).text.strip()
except:
pass
if ip is not None and isValidIP(ip) == True:
break
return ip
def getDNSNames():
""" Returns ( hostname, domain ) of system """
host, domain = None, None
try:
host, domain = socket.getfqdn().split('.', 1)
except:
pass
if host == None or host == "":
try:
host, domain = socket.gethostbyaddr(socket.gethostname())[0].split('.', 1)
except:
pass
if host == None or host == "":
try:
host, domain = socket.getaddrinfo(socket.gethostname(), 0, flags=socket.AI_CANONNAME)[0][3].split('.', 1)
except:
pass
return host, domain
def hostToIP(host):
""" Returns ip of host, or None on failure"""
try:
return socket.gethostbyname(host)
except:
return None
def ipToHost(ip):
""" Returns hostname of ip, or None on failure"""
try:
return socket.gethostbyaddr(ip)[0]
except:
return None
def objToDict(obj):
"""
converts an arbitrary object to dict
"""
return dict((attr, getattr(obj, attr)) for attr in dir(obj) if
not attr.startswith('__') and not inspect.ismethod(getattr(obj, attr)))
def rowToDict(row):
"""
converts sqlalchemy row object to python dict
does not recurse through relationships
tries table data, then _asdict() method, then objToDict()
"""
d = {}
if hasattr(row, '__table__'):
for column in row.__table__.columns:
d[column.name] = str(getattr(row, column.name))
elif hasattr(row, '_asdict'):
d = row._asdict()
else:
d = objToDict(row)
return d
def strFieldsToDict(fields_str):
return dict(field.split(':') for field in fields_str.split(','))
def dictToStrFields(fields_dict):
return ','.join("{}:{}".format(k, v) for k, v in fields_dict.items())
def updateConfig(config_obj, field_dict):
config_file = "<no filepath available>"
try:
config_file = config_obj.__file__
with open(config_file, 'r+') as config:
config_str = config.read()
for key, val in field_dict.items():
regex = r"^(?!#)(?:" + re.escape(key) + \
r")[ \t]*=[ \t]*(?:\w+\(.*\)[ \t\v]*$|[\w\d\.]+[ \t]*$|\{.*\}|\[.*\][ \t]*$|\(.*\)[ \t]*$|\"\"\".*\"\"\"[ \t]*$|'''.*'''[ \v]*$|\".*\"[ \t]*$|'.*')"
replace_str = "{} = {}".format(key, repr(val))
config_str = re.sub(regex, replace_str, config_str, flags=re.MULTILINE)
config.seek(0)
config.write(config_str)
config.truncate()
except:
print('Problem updating the {0} configuration file').format(config_file)
def stripDictVals(d):
for key, val in d.items():
if isinstance(val, str):
d[key] = val.strip()
elif isinstance(val, int):
d[key] = int(str(val).strip())
return d
def getCustomRoutes():
""" Return custom kamailio routes from config file """
custom_routes = []
with open(settings.KAM_CFG_PATH, 'r') as kamcfg_file:
kamcfg_str = kamcfg_file.read()
regex = r"CUSTOM_ROUTING_START.*CUSTOM_ROUTING_END"
custom_routes_str = re.search(regex, kamcfg_str, flags=re.MULTILINE | re.DOTALL).group(0)
regex = r"^route\[(\w+)\]"
matches = re.finditer(regex, custom_routes_str, flags=re.MULTILINE)
for matchnum, match in enumerate(matches):
if len(match.groups()) > 0:
custom_routes.append(match.group(1))
for route in custom_routes:
print(route)
return custom_routes
def generateID(size=10, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# modified method from Python cookbook, #475186
def supportsColor(stream):
""" Return True if terminal supports ASCII color codes """
if not hasattr(stream, "isatty") or not stream.isatty():
# auto color only on TTYs
return False
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
class IO():
""" Contains static methods for handling i/o operations """
if supportsColor(sys.stdout):
@staticmethod
def printerr(message):
print('\x1b[1;31m' + str(message).strip() + '\x1b[0m')
@staticmethod
def printinfo(message):
print('\x1b[1;32m' + str(message).strip() + '\x1b[0m')
@staticmethod
def printwarn(message):
print('\x1b[1;33m' + str(message).strip() + '\x1b[0m')
@staticmethod
def printdbg(message):
print('\x1b[1;34m' + str(message).strip() + '\x1b[0m')
@staticmethod
def printbold(message):
print('\x1b[1;37m' + str(message).strip() + '\x1b[0m')
@staticmethod
def logcrit(message):
logging.getLogger().log(logging.CRITICAL, '\x1b[1;31m' + str(message).strip() + '\x1b[0m')
@staticmethod
def logerr(message):
logging.getLogger().log(logging.ERROR, '\x1b[1;31m' + str(message).strip() + '\x1b[0m')
@staticmethod
def loginfo(message):
logging.getLogger().log(logging.INFO, '\x1b[1;32m' + str(message).strip() + '\x1b[0m')
@staticmethod
def logwarn(message):
logging.getLogger().log(logging.WARNING, '\x1b[1;33m' + str(message).strip() + '\x1b[0m')
@staticmethod
def logdbg(message):
logging.getLogger().log(logging.DEBUG, '\x1b[1;34m' + str(message).strip() + '\x1b[0m')
@staticmethod
def lognolvl(message):
logging.getLogger().log(logging.NOTSET, '\x1b[1;37m' + str(message).strip() + '\x1b[0m')
else:
@staticmethod
def printerr(message):
print(str(message).strip())
@staticmethod
def printinfo(message):
print(str(message).strip())
@staticmethod
def printwarn(message):
print(str(message).strip())
@staticmethod
def printdbg(message):
print(str(message).strip())
@staticmethod
def printbold(message):
print(str(message).strip())
@staticmethod
def logcrit(message):
logging.getLogger().log(logging.CRITICAL, str(message).strip())
@staticmethod
def logerr(message):
logging.getLogger().log(logging.ERROR, str(message).strip())
@staticmethod
def loginfo(message):
logging.getLogger().log(logging.INFO, str(message).strip())
@staticmethod
def logwarn(message):
logging.getLogger().log(logging.WARNING, str(message).strip())
@staticmethod
def logdbg(message):
logging.getLogger().log(logging.DEBUG, str(message).strip())
@staticmethod
def lognolvl(message):
logging.getLogger().log(logging.NOTSET, str(message).strip())
def debugException(ex=None, log_ex=True, print_ex=False, showstack=True):
"""
Debugging of an exception: print and/or log frame and/or stacktrace
:param ex: The exception object
:param log_ex: True | False
:param print_ex: True | False
:param showstack: True | False
"""
# get basic info and the stack
exc_type, exc_value, exc_tb = sys.exc_info()
text = "((( EXCEPTION )))\n[CLASS]: {}\n[VALUE]: {}\n".format(exc_type, exc_value)
# get detailed exception info
if ex is None:
ex = exc_value
for k,v in vars(ex).items():
text += "[{}]: {}\n".format(k.upper(), str(v))
# determine how far we trace it back
tb_list = None
if showstack:
tb_list = traceback.extract_tb(exc_tb)
else:
tb_list = traceback.extract_tb(exc_tb, limit=1)
# ensure a backtrace exists first
if tb_list is not None and len(tb_list) > 0:
text += "((( BACKTRACE )))\n"
for tb_info in tb_list:
filename, linenum, funcname, source = tb_info
if funcname != '<module>':
funcname = funcname + '()'
text += "[FILE]: {}\n[LINE NUM]: {}\n[FUNCTION]: {}\n[SOURCE]: {}".format(filename, linenum, funcname,
source)
if log_ex:
IO.logerr(text)
if print_ex:
IO.printerr(text)
def debugEndpoint(log_out=False, print_out=True, **kwargs):
"""
Debug an endpoint, must be run within request context
:param log_out: True | False
:param print_out: True | False
:param kwargs: Any args to print / log (<key=value> key word pairs)
"""
calling_chain = []
frame = sys._getframe().f_back if sys._getframe().f_back is not None else sys._getframe()
# parent module
if hasattr(frame.f_code, 'co_filename'):
calling_chain.append(os.path.abspath(frame.f_code.co_filename))
# parent class
if 'self' in frame.f_locals:
calling_chain.append(frame.f_locals["self"].__class__)
else:
for k,v in frame.f_globals.items():
if not k.startswith('__') and frame.f_code.co_name in dir(v):
calling_chain.append(k)
break
# parent func
if frame.f_code.co_name != '<module>':
calling_chain.append(frame.f_code.co_name)
text = "((( [DEBUG ENDPOINT]: {} )))\n".format(' -> '.join(calling_chain))
items_dict = objToDict(request)
for k, v in sorted(items_dict.items()):
text += '{}: {}\n'.format(k, str(v).strip())
if len(kwargs) > 0:
for k, v in sorted(kwargs):
text += "{}: {}\n".format(k, v)
if log_out:
IO.logdbg(text)
if print_out:
IO.printdbg(text)
def allowed_file(filename,ALLOWED_EXTENSIONS=set(['csv','txt','pdf','png','jpg','jpeg','gif'])):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def showError(type="", code=500, msg=None):
return render_template('error.html', type=type), code
def redirectCustom(location, *render_args, code=302, response_cb=None, force_redirect=False):
"""
=======
Summary
=======
Combines functionality of :func:`werkzeug.utils.redirect` with :func:`flask.templating.render_template`
to allow for virtual redirection to an endpoint with any HTTP status code.\n
=========
Use Cases
=========
1. render template at a custom url (doesn't have to be an endpoint)
def index():
return redirectCustom('http://localhost:5000/notfound', render_template('not_found.html'), code=404)
def index():
html = '<h1>Are you lost {{ username }}?</h1>'
return redirectCustom(url_for('notfound'), render_template_string(html), username='john', code=404)
2. customizing the response before sending to the client
def index():
def addAuthHeaders(response):
response.headers['Access-Control-Allow-Origin' = '*'
response.headers['Access-Control-Allow-Headers'] = 'origin, x-requested-with, content-type'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
return redirectCustom(url_for('login'), render_template('login_custom.html'), code=403, response_cb=addAuthHeaders)
3. redirecting with a custom HTTP status code (normally restricted to 300's)
def index():
return redirectCustom(url_for('http://localhost:5000/error'), showError())
4. forcing redirection when client normally would ignore location header
def index():
return redirectCustom(url_for(showError), showError(), code=500, force_redirect=True)
:note: the endpoint logic is only executed when a the view function is passed in render_args
:param location: the location the response should virtually redirect to
:param render_args: the return value from a view / template rendering function
:param code: the return HTTP status code, defaults to 302 like a normal redirect
:param response_cb: callback function taking :class:`werkzeug.wrappers.Response` object as arg and returning edited response
:param force_redirect: whether to force redirection on client, only needed when client ignores location header
:return: client viewable :class:`werkzeug.wrappers.Response` object
"""
display_location = escape(location)
if isinstance(location, str):
# Safe conversion as stated in :func:`werkzeug.utils.redirect`
location = iri_to_uri(location, safe_conversion=True)
# create a response object, from rendered data (accepts None)
response = make_response(*render_args)
# if no render_args given fill response with default redirect html
if len(render_args) <= 0:
response.response = '<!DOCTYPE HTML">\n' \
'<html><head><meta charset="utf-8"><meta http-equiv="refresh" content="0; URL={location}">' \
'<title>Redirecting</title></head>\n' \
'<body><script type="text/javascript">window.location.href={location};</script></body></html>' if force_redirect else \
'<body><h1>Redirecting...</h1>\n' \
'<p>You should be redirected automatically to target URL: ' \
'<a href="{location}">{display_location}</a>. If not click the link.</p></body></html>' \
.format(location=escape(location), display_location=display_location)
response.mimetype = 'text/html'
# customize response if needed
if response_cb is not None:
response = response_cb(response)
# override return code if set from render args
if len(render_args) == 3:
response.status = code
# change response location
response.headers['Location'] = location
return response
class status():
"""
Namespace for descriptive status codes, for code readability
-- HTTP Status Codes --
Original Idea From: `flask_api <https://www.flaskapi.org/>`_
:ref RFC2616: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
:ref RFC6585: http://tools.ietf.org/html/rfc6585
... examples of possible future sections ...
-- MYSQL Error Codes --
-- SERVER Status Codes --
-- KAMAILIO Error Codes --
-- RTPENGINE Error Codes --
"""
HTTP_CONTINUE = 100
HTTP_SWITCHING_PROTOCOLS = 101
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
HTTP_MULTI_STATUS = 207
HTTP_MULTIPLE_CHOICES = 300
HTTP_MOVED_PERMANENTLY = 301
HTTP_FOUND = 302
HTTP_SEE_OTHER = 303
HTTP_NOT_MODIFIED = 304
HTTP_USE_PROXY = 305
HTTP_RESERVED = 306
HTTP_TEMPORARY_REDIRECT = 307
HTTP_PERMANENT_REDIRECT = 308
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_PAYMENT_REQUIRED = 402
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_NOT_ACCEPTABLE = 406
HTTP_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_REQUEST_TIMEOUT = 408
HTTP_CONFLICT = 409
HTTP_GONE = 410
HTTP_LENGTH_REQUIRED = 411
HTTP_PRECONDITION_FAILED = 412
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_REQUEST_URI_TOO_LONG = 414
HTTP_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_EXPECTATION_FAILED = 417
HTTP_PRECONDITION_REQUIRED = 428
HTTP_TOO_MANY_REQUESTS = 429
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_CONNECTION_CLOSED_WITHOUT_RESPONSE = 444
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
HTTP_BAD_GATEWAY = 502
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_GATEWAY_TIMEOUT = 504
HTTP_HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_LOOP_DETECTED = 508
HTTP_NOT_EXTENDED = 510
HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511
|
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import os
import signal
import time
from concurrent import futures
import threading
import grpc
import tensorflow.compat.v1 as tf
from fedlearner.common import fl_logging
from fedlearner.common import trainer_master_service_pb2 as tm_pb
from fedlearner.common import trainer_master_service_pb2_grpc as tm_grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.trainer.estimator import FLEstimator
from fedlearner.trainer.sparse_estimator import SparseFLEstimator
from fedlearner.trainer.cluster_server import ClusterServer
from fedlearner.trainer._global_context import global_context as _gctx
class ExportModelHook():
def after_save(self, sess, model, export_dir, inputs, outputs):
pass
class _TriggerHook(tf.train.SessionRunHook):
def __init__(self,
trigger_secs=None,
trigger_steps=None,
trigger_fn=None):
self._trigger_secs = trigger_secs
self._trigger_steps = trigger_steps
self._trigger_fn = trigger_fn
def begin(self):
self._global_step_tensor = tf.train.get_or_create_global_step()
self._last_triggered_time = None
self._last_triggered_step = None
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._should_trigger(global_step):
self._trigger(global_step)
def end(self, session):
global_step = session.run(self._global_step_tensor)
self._trigger(global_step)
def _should_trigger(self, global_step):
if self._last_triggered_time is None \
or self._last_triggered_step is None:
return True
if self._trigger_secs is not None:
if time.time() >= self._last_triggered_time + self._trigger_secs:
return True
if self._trigger_steps is not None:
if global_step >= self._last_triggered_step + self._trigger_steps:
return True
return False
def _trigger(self, global_step):
if self._trigger_fn:
self._trigger_fn(global_step)
self._last_triggered_time = time.time()
self._last_triggered_step = global_step
#class _CheckpointSaverHook(tf.train.CheckpointSaverHook):
# def _save(self, session, step):
# if self._timer.last_triggered_step() is None:
# # skip save checkpoint
# fl_logging.info("skip save checkpoint")
# return False
# return super(_CheckpointSaverHook, self)._save(session, step)
class _DataVisitorCheckpointHook(tf.train.SessionRunHook):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt_plhd = tf.placeholder(tf.string, name="data_checkpoint_plhd")
self._ckpt_var = tf.Variable("", name="data_checkpoint")
self._save_op = self._ckpt_var.assign(self._ckpt_plhd)
def after_create_session(self, session, coord):
data = session.run(self._ckpt_var)
self._visitor.restore(data)
def before_checkpoint_save(self, session, global_step_value):
data = self._visitor.dump()
fl_logging.info("DataVisitor save checkpoint for global step %d, "
"size: %d", global_step_value, len(data))
session.run(
self._save_op,
{self._ckpt_plhd: data},
)
def create_checkpoint_saver_listener(self):
return _DataVisitorCheckpointHook.CheckpointSaverListener(self)
class CheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, hook):
self._hook = hook
def before_save(self, session, global_step_value):
self._hook.before_checkpoint_save(session, global_step_value)
class DataBlockCheckpointSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, visitor):
self._visitor = visitor
def begin(self):
self._ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable("", name="data_checkpoint")
self._save_op = var_tmp.assign(self._ckpt)
def before_save(self, session, global_step_value):
session.run(
self._save_op,
{self._ckpt: self._visitor.dump()}
)
#fl_logging.info("data checkpoint saved result: %s", res)
class _FakeBridge():
def send_op(self, name, x):
def func(x):
raise RuntimeError("Unexcepted call send op")
out = tf.py_function(func=func, inp=[x], Tout=[], name='send_' + name)
return out
def receive_op(self, name, dtype):
def func():
raise RuntimeError("Unexcepted call receive op")
return tf.py_function(func=func, inp=[], Tout=[dtype])[0]
def register_data_block_handler(self, handler):
pass
class _FakeTrainerMasterClient():
pass
class _TrainerMaster(tm_grpc.TrainerMasterServiceServicer):
def __init__(self,
cluster_server,
role,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
self._cluster_server = cluster_server
self._role = role
self._mode = mode
self._model_fn = model_fn
self._input_fn = input_fn
self._serving_input_receiver_fn = serving_input_receiver_fn
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._checkpoint_path = checkpoint_path
self._save_checkpoint_steps = save_checkpoint_steps
self._save_checkpoint_secs = save_checkpoint_secs
self._summary_path = summary_path
self._summary_save_steps = summary_save_steps
self._summary_save_secs = summary_save_secs
self._export_path = export_path
self._sparse_estimator = sparse_estimator
self._export_model_hook = export_model_hook
self._lock = threading.RLock()
self._status = tm_pb.MasterStatus.CREATED
self._checkpoint_listeners = []
self._session_hooks = []
self._running_workers = set() # set(worker_rank)
self._completed_workers = set() # set(worker_rank)
# for compatibility
self._worker0_terminated_at = 0
self._worker0_cluster_def = None
def _check_status(self, callback_fn):
with self._lock:
return callback_fn(self._status)
def _run_grpc_server(self, address):
self._grpc_server = grpc.server(
futures.ThreadPoolExecutor(
max_workers=8,
thread_name_prefix="TrainerMasterServerThreadPoolExecutor"
))
tm_grpc.add_TrainerMasterServiceServicer_to_server(
self, self._grpc_server)
self._grpc_server.add_insecure_port(address)
self._grpc_server.start()
fl_logging.info('Trainer Master Server start on address: %s', address)
def _transfer_status(self, frm, to):
if self._status != frm:
raise RuntimeError(
"Trainer Master status transfer failed, "
"want from %s to %s, but current status: %s"% \
(tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to),
tm_pb.MasterStatus.Name(self._status))
)
self._status = to
fl_logging.info("Trainer Master status transfer, from %s to %s",
tm_pb.MasterStatus.Name(frm),
tm_pb.MasterStatus.Name(to))
def run_forever(self, listen_port=None):
with self._lock:
self._transfer_status(tm_pb.MasterStatus.CREATED,
tm_pb.MasterStatus.INITIALING)
if listen_port:
self._run_grpc_server(listen_port)
while self._cluster_server is None:
# waiting receive cluster_def from worker0
with self._lock:
if self._worker0_cluster_def:
fl_logging.info("received worker_0 cluster_def: %s",
self._worker0_cluster_def)
self._cluster_server = ClusterServer(
tf.train.ClusterSpec(self._worker0_cluster_def),
"master")
break
fl_logging.info("still waiting receive cluster_def from worker_0")
time.sleep(2)
self._run()
sig = signal.sigwait([signal.SIGHUP, signal.SIGINT, signal.SIGTERM])
fl_logging.info("Server shutdown by signal: %s",
signal.Signals(sig).name)
def _add_checkpoint_listener(self, listener):
with self._lock:
self._checkpoint_listeners.append(listener)
def _add_session_hook(self, hook):
with self._lock:
self._session_hooks.append(hook)
def _create_estimator(self):
estimator_factory = SparseFLEstimator \
if self._sparse_estimator else FLEstimator
return estimator_factory(
cluster_server=self._cluster_server,
bridge=_FakeBridge(),
trainer_master=_FakeTrainerMasterClient(),
role=self._role,
model_fn=self._model_fn)
def _run(self):
fl_logging.info("create estimator")
estimator = self._create_estimator()
fl_logging.info("start session_run")
self._session_run(estimator)
fl_logging.info("session_run done")
fl_logging.info("start export_model")
self._export_model(estimator)
fl_logging.info("export_model done")
self._transfer_status(tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED)
def _session_run(self, estimator):
mode_key = tf.estimator.ModeKeys.TRAIN if self._mode == "train" \
else tf.estimator.ModeKeys.EVAL
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
features, labels = estimator. \
_get_features_and_labels_from_input_fn(
self._input_fn, mode_key)
# only for create graph
spec, _ = estimator._get_model_spec(
features, labels, mode_key)
session_creator = tf.train.ChiefSessionCreator(
master=self._cluster_server.target,
config=self._cluster_server.cluster_config,
checkpoint_filename_with_path= \
self._checkpoint_filename_with_path
)
hooks = self._session_hooks
# saver hook
if mode_key == tf.estimator.ModeKeys.TRAIN \
and self._checkpoint_path \
and (self._save_checkpoint_secs \
or self._save_checkpoint_steps):
hooks.append(
tf.train.CheckpointSaverHook(
checkpoint_dir=self._checkpoint_path,
save_secs=self._save_checkpoint_secs,
save_steps=self._save_checkpoint_steps,
listeners=self._checkpoint_listeners,
)
)
# summary hook
if mode_key == tf.estimator.ModeKeys.TRAIN \
and (self._summary_save_secs or self._summary_save_steps):
if not self._summary_path:
self._summary_path = self._checkpoint_path
if self._summary_path:
hooks.append(
tf.train.SummarySaverHook(
output_dir=self._summary_path,
save_secs=self._summary_save_secs,
save_steps=self._summary_save_steps,
scaffold=session_creator._scaffold,
)
)
noop = tf.no_op()
with tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks) as sess:
with self._lock:
# ready, set status to running
self._transfer_status(tm_pb.MasterStatus.INITIALING,
tm_pb.MasterStatus.RUNNING)
while True:
sess.run(noop)
with self._lock:
if self._status == tm_pb.MasterStatus.WORKER_COMPLETED:
break
time.sleep(0.2)
def _export_model(self, estimator):
if self._export_path:
export_path = os.path.join(
self._export_path, str(self._worker0_terminated_at))
with tf.Graph().as_default() as g, \
g.device(self._cluster_server.device_setter):
receiver = self._serving_input_receiver_fn()
spec, model = estimator._get_model_spec(
receiver.features, None, tf.estimator.ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session(
target=self._cluster_server.target,
config=self._cluster_server.cluster_config) as sess:
tf.saved_model.simple_save(sess, export_path,
receiver.receiver_tensors,
spec.predictions, None)
if self._export_model_hook:
self._export_model_hook.after_save(
sess, model, export_path,
receiver.receiver_tensors, spec.predictions)
def _request_data_block(self, request):
"""override by subclass"""
raise RuntimeError("Unimplement")
def RequestDataBlock(self, request, context):
if request.worker_rank not in self._running_workers:
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
if request.worker_rank in self._completed_workers:
return tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="worker has completed")
)
return self._request_data_block(request)
def WorkerRegister(self, request, context):
with self._lock:
# for compatibility, more information see:
# protocal/fedlearner/common/trainer_master_service.proto
if self._worker0_cluster_def is None and request.worker_rank == 0:
self._worker0_cluster_def = request.cluster_def
if self._status in (tm_pb.MasterStatus.WORKER_COMPLETED,
tm_pb.MasterStatus.COMPLETED):
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED
))
if self._status != tm_pb.MasterStatus.RUNNING:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode. \
STATUS_WAIT_FOR_SYNCING_CHECKPOINT
))
if request.worker_rank in self._running_workers:
fl_logging.warning("worker_%d:%s repeat registration",
request.worker_rank, request.hostname)
else:
fl_logging.info("worker_%d:%s registration",
request.worker_rank, request.hostname)
self._running_workers.add(request.worker_rank)
if request.worker_rank in self._completed_workers:
self._completed_workers.remove(request.worker_rank)
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def WorkerComplete(self, request, context):
with self._lock:
if request.worker_rank not in self._running_workers:
return tm_pb.WorkerRegisterResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_REQUEST,
error_message="unregistered worker")
)
fl_logging.info("worker_%d completed", request.worker_rank)
self._completed_workers.add(request.worker_rank)
if request.worker_rank == 0:
self._worker0_terminated_at = request.timestamp
if len(self._running_workers) == len(self._completed_workers) \
and 0 in self._running_workers:
# worker 0 completed and all datablock has finished
self._transfer_status(tm_pb.MasterStatus.RUNNING,
tm_pb.MasterStatus.WORKER_COMPLETED)
return tm_pb.WorkerCompleteResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS)
)
def IsCompleted(self, request, context):
with self._lock:
return tm_pb.IsCompletedResponse(
completed=(self._status == tm_pb.MasterStatus.COMPLETED)
)
class LeaderTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
super(LeaderTrainerMaster, self).__init__(
cluster_server,
"leader",
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook)
self._data_visitor = data_visitor
self._last_global_step = -1
# datavisitor checkpoint hook
hook = _DataVisitorCheckpointHook(self._data_visitor)
self._add_checkpoint_listener(
hook.create_checkpoint_saver_listener())
self._add_session_hook(hook)
# trigger hook
self._last_trigger_time = 0
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_fn)
)
def _trigger_fn(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
allocated_epoch, allocated_datablock = self._data_visitor.summary()
total_epoch, total_datablock = \
self._data_visitor.epoch_num, \
self._data_visitor.datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"epoch: %d/%d, datablock allocated: %d/%d, "
"worker: %d/%d(running/completed)",
global_step, speed,
allocated_epoch, total_epoch,
allocated_datablock, total_datablock,
len(self._running_workers),
len(self._completed_workers))
with _gctx.stats_client.pipeline() as pipe:
pipe.gauge("trainer.global_step", global_step)
pipe.gauge("trainer.datablock_total", total_datablock)
pipe.gauge("trainer.datablock_allocated", allocated_datablock)
pipe.gauge("trainer.speed", speed)
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
try:
data_block = next(self._data_visitor)
except StopIteration:
data_block = None
response = tm_pb.DataBlockResponse()
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_DATA_FINISHED,
error_message="data block finished")
)
return response
class FollowerTrainerMaster(_TrainerMaster):
def __init__(self,
cluster_server,
data_visitor,
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path=None,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None,
summary_path=None,
summary_save_steps=None,
summary_save_secs=None,
export_path=None,
sparse_estimator=False,
export_model_hook=None):
super(FollowerTrainerMaster, self).__init__(
cluster_server,
"follower",
mode,
model_fn,
input_fn,
serving_input_receiver_fn,
checkpoint_filename_with_path,
checkpoint_path,
save_checkpoint_steps,
save_checkpoint_secs,
summary_path,
summary_save_steps,
summary_save_secs,
export_path,
sparse_estimator,
export_model_hook)
self._data_visitor = data_visitor
self._last_global_step = -1
# trigger hook
self._last_trigger_time = 0
self._add_session_hook(
_TriggerHook(trigger_secs=10,
trigger_fn=self._trigger_fn)
)
def _trigger_fn(self, global_step):
now = time.time()
if self._last_global_step >= 0:
speed = (global_step-self._last_global_step) \
/ (now-self._last_trigger_time)
total_datablock = self._data_visitor.datablock_size
fl_logging.info("global_step: %d, speed: %0.2f step/sec, "
"datablock size: %d, "
"worker: %d/%d(running/completed)",
global_step, speed,
total_datablock,
len(self._running_workers),
len(self._completed_workers))
with _gctx.stats_client.pipeline() as pipe:
pipe.gauge("trainer.global_step", global_step)
pipe.gauge("trainer.datablock_total", total_datablock)
pipe.gauge("trainer.speed", speed)
self._last_trigger_time = now
self._last_global_step = global_step
def _request_data_block(self, request):
data_block = self._data_visitor.get_datablock_by_id(request.block_id)
if data_block:
fl_logging.info("allocated worker_%d with block: %s",
request.worker_rank,
data_block.id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_SUCCESS),
block_id=data_block.id,
data_path=data_block.data_path,
)
else:
fl_logging.error("invalid data block id: %s", request.block_id)
response = tm_pb.DataBlockResponse(
status=common_pb.Status(
code=common_pb.StatusCode.STATUS_INVALID_DATA_BLOCK,
error_message="invalid data block")
)
return response
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create Chrome Installer archive.
This script is used to create an archive of all the files required for a
Chrome install in appropriate directory structure. It reads chrome.release
file as input, creates chrome.7z archive, compresses setup.exe and
generates packed_files.txt for mini_installer project.
"""
import ConfigParser
import glob
import optparse
import os
import re
import shutil
import subprocess
import sys
ARCHIVE_DIR = "installer_archive"
# suffix to uncompresed full archive file, appended to options.output_name
ARCHIVE_SUFFIX = ".7z"
BSDIFF_EXEC = "bsdiff.exe"
CHROME_DIR = "Chrome-bin"
CHROME_PATCH_FILE_SUFFIX = "_patch" # prefixed by options.output_name
# compressed full archive suffix, will be prefixed by options.output_name
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
COMPRESSED_FILE_EXT = ".packed.7z" # extension of patch archive file
COURGETTE_EXEC = "courgette.exe"
MINI_INSTALLER_INPUT_FILE = "packed_files.txt"
PATCH_FILE_EXT = '.diff'
SETUP_EXEC = "setup.exe"
SETUP_PATCH_FILE_PREFIX = "setup_patch"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
VERSION_FILE = "VERSION"
g_archive_inputs = []
def BuildVersion(build_dir):
"""Returns the full build version string constructed from information in
VERSION_FILE. Any segment not found in that file will default to '0'.
"""
major = 0
minor = 0
build = 0
patch = 0
for line in open(os.path.join(build_dir, '../../chrome', VERSION_FILE), 'r'):
line = line.rstrip()
if line.startswith('MAJOR='):
major = line[6:]
elif line.startswith('MINOR='):
minor = line[6:]
elif line.startswith('BUILD='):
build = line[6:]
elif line.startswith('PATCH='):
patch = line[6:]
return '%s.%s.%s.%s' % (major, minor, build, patch)
def CompressUsingLZMA(build_dir, compressed_file, input_file, verbose):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). This results in a ~2.3MB decrease in installer size on
# a 24MB installer.
# Additionally, these settings reflect a 7zip 4.42 and up change in
# the definition of -mx9, increasting the dicionary size moving to
# 26bit = 64MB. This results in an additional ~3.5MB decrease.
# Older 7zip versions can support these settings, as these changes
# rely on existing functionality in the lzma format.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
compressed_file,
input_file,]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd, verbose)
def CopyAllFilesToStagingDir(config, distribution, staging_dir, build_dir,
enable_hidpi):
"""Copies the files required for installer archive.
Copies all common files required for various distributions of Chromium and
also files for the specific Chromium build specified by distribution.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir)
if distribution:
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:]
CopySectionFilesToStagingDir(config, distribution.upper(),
staging_dir, build_dir)
if enable_hidpi == '1':
CopySectionFilesToStagingDir(config, 'HIDPI', staging_dir, build_dir)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
if option.endswith('dir'):
continue
dst_dir = os.path.join(staging_dir, config.get(section, option))
src_paths = glob.glob(os.path.join(src_dir, option))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
g_archive_inputs.append(src_path)
shutil.copy(src_path, dst_dir)
def GenerateDiffPatch(options, orig_file, new_file, patch_file):
if (options.diff_algorithm == "COURGETTE"):
exe_file = os.path.join(options.last_chrome_installer, COURGETTE_EXEC)
cmd = '%s -gen "%s" "%s" "%s"' % (exe_file, orig_file, new_file, patch_file)
else:
exe_file = os.path.join(options.build_dir, BSDIFF_EXEC)
cmd = [exe_file, orig_file, new_file, patch_file,]
RunSystemCommand(cmd, options.verbose)
def GetLZMAExec(build_dir):
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
return lzma_exec
def GetPrevVersion(build_dir, temp_dir, last_chrome_installer, output_name):
if not last_chrome_installer:
return ''
lzma_exec = GetLZMAExec(build_dir)
prev_archive_file = os.path.join(last_chrome_installer,
output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'x',
'-o"%s"' % temp_dir,
prev_archive_file,
'Chrome-bin/*/chrome.dll',]
RunSystemCommand(cmd, options.verbose)
dll_path = glob.glob(os.path.join(temp_dir, 'Chrome-bin', '*', 'chrome.dll'))
return os.path.split(os.path.split(dll_path[0])[0])[1]
def MakeStagingDirectories(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(temp_file_path):
shutil.rmtree(temp_file_path)
os.makedirs(temp_file_path)
return (file_path, temp_file_path)
def Readconfig(input_file, current_version):
"""Reads config information from input file after setting default value of
global variabes.
"""
variables = {}
variables['ChromeDir'] = CHROME_DIR
variables['VersionDir'] = os.path.join(variables['ChromeDir'],
current_version)
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, verbose):
"""Runs |cmd|, prints the |cmd| and its output if |verbose|; otherwise
captures its output and only emits it on failure.
"""
if verbose:
print 'Running', cmd
try:
# Run |cmd|, redirecting stderr to stdout in order for captured errors to be
# inline with corresponding stdout.
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if verbose:
print output
except subprocess.CalledProcessError as e:
raise Exception("Error while running cmd: %s\n"
"Exit code: %s\n"
"Command output:\n%s" %
(e.cmd, e.returncode, e.output))
def CreateArchiveFile(options, staging_dir, current_version, prev_version):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (chrome.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
if options.depfile:
# If a depfile was requested, do the glob of the staging dir and generate
# a list of dependencies in .d format. We list the files that were copied
# into the staging dir, not the files that are actually in the staging dir
# because the ones in the staging dir will never be edited, and we want
# to have the build be triggered when the thing-that-was-copied-there
# changes.
def path_fixup(path):
"""Fixes path for depfile format: backslash to forward slash, and
backslash escaping for spaces."""
return path.replace('\\', '/').replace(' ', '\\ ')
# Gather the list of files in the staging dir that will be zipped up. We
# only gather this list to make sure that g_archive_inputs is complete (i.e.
# that there's not file copies that got missed).
staging_contents = []
for root, dirs, files in os.walk(os.path.join(staging_dir, CHROME_DIR)):
for filename in files:
staging_contents.append(path_fixup(os.path.join(root, filename)))
# Make sure there's an archive_input for each staging dir file.
for staging_file in staging_contents:
for archive_input in g_archive_inputs:
archive_rel = path_fixup(archive_input)
if (os.path.basename(staging_file).lower() ==
os.path.basename(archive_rel).lower()):
break
else:
raise Exception('Did not find an archive input file for "%s"' %
staging_file)
# Finally, write the depfile referencing the inputs.
with open(options.depfile, 'wb') as f:
f.write(path_fixup(os.path.relpath(archive_file, options.build_dir)) +
': \\\n')
f.write(' ' + ' \\\n '.join(path_fixup(x) for x in g_archive_inputs))
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.join(staging_dir, CHROME_DIR),
'-mx0',]
# There doesnt seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd, options.verbose)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd, options.verbose)
# Do not compress the archive in developer (component) builds.
if options.component_build == '1':
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
# If we are generating a patch, run bsdiff against previous build and
# compress the resulting patch file. If this is not a patch just compress the
# uncompressed archive file.
patch_name_prefix = options.output_name + CHROME_PATCH_FILE_SUFFIX
if options.last_chrome_installer:
prev_archive_file = os.path.join(options.last_chrome_installer,
options.output_name + ARCHIVE_SUFFIX)
patch_file = os.path.join(options.build_dir, patch_name_prefix +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_archive_file, archive_file, patch_file)
compressed_archive_file = patch_name_prefix + '_' + \
current_version + '_from_' + prev_version + \
COMPRESSED_FILE_EXT
orig_file = patch_file
else:
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
orig_file = archive_file
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
CompressUsingLZMA(options.build_dir, compressed_archive_file_path, orig_file,
options.verbose)
return compressed_archive_file
def PrepareSetupExec(options, current_version, prev_version):
"""Prepares setup.exe for bundling in mini_installer based on options."""
if options.setup_exe_format == "FULL":
setup_file = SETUP_EXEC
elif options.setup_exe_format == "DIFF":
if not options.last_chrome_installer:
raise Exception(
"To use DIFF for setup.exe, --last_chrome_installer is needed.")
prev_setup_file = os.path.join(options.last_chrome_installer, SETUP_EXEC)
new_setup_file = os.path.join(options.build_dir, SETUP_EXEC)
patch_file = os.path.join(options.build_dir, SETUP_PATCH_FILE_PREFIX +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_setup_file, new_setup_file, patch_file)
setup_file = SETUP_PATCH_FILE_PREFIX + '_' + current_version + \
'_from_' + prev_version + COMPRESSED_FILE_EXT
setup_file_path = os.path.join(options.build_dir, setup_file)
CompressUsingLZMA(options.build_dir, setup_file_path, patch_file,
options.verbose)
else:
cmd = ['makecab.exe',
'/D', 'CompressionType=LZX',
'/V1',
'/L', options.output_dir,
os.path.join(options.build_dir, SETUP_EXEC),]
RunSystemCommand(cmd, options.verbose)
setup_file = SETUP_EXEC[:-1] + "_"
return setup_file
_RESOURCE_FILE_HEADER = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside
// mini_installer.exe. For each file to be linked there should be two
// lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
"""
def CreateResourceInputFile(
output_dir, setup_format, archive_file, setup_file, resource_file_path,
component_build, staging_dir, current_version):
"""Creates resource input file (packed_files.txt) for mini_installer project.
This method checks the format of setup.exe being used and according sets
its resource type.
"""
setup_resource_type = "BL"
if (setup_format == "FULL"):
setup_resource_type = "BN"
elif (setup_format == "DIFF"):
setup_resource_type = "B7"
# An array of (file, type, path) tuples of the files to be included.
resources = []
resources.append((setup_file, setup_resource_type,
os.path.join(output_dir, setup_file)))
resources.append((archive_file, 'B7',
os.path.join(output_dir, archive_file)))
# Include all files needed to run setup.exe (these are copied into the
# 'Installer' dir by DoComponentBuildTasks).
if component_build:
installer_dir = os.path.join(staging_dir, CHROME_DIR, current_version,
'Installer')
for file in os.listdir(installer_dir):
resources.append((file, 'BN', os.path.join(installer_dir, file)))
with open(resource_file_path, 'w') as f:
f.write(_RESOURCE_FILE_HEADER)
for (file, type, path) in resources:
f.write('\n%s %s\n "%s"\n' % (file, type, path.replace("\\","/")))
# Reads |manifest_name| from |build_dir| and writes |manifest_name| to
# |output_dir| with the same content plus |inserted_string| added just before
# |insert_before|.
def CopyAndAugmentManifest(build_dir, output_dir, manifest_name,
inserted_string, insert_before):
with open(os.path.join(build_dir, manifest_name), 'r') as f:
manifest_lines = f.readlines()
insert_line = -1
insert_pos = -1
for i in xrange(len(manifest_lines)):
insert_pos = manifest_lines[i].find(insert_before)
if insert_pos != -1:
insert_line = i
break
if insert_line == -1:
raise ValueError('Could not find {0} in the manifest:\n{1}'.format(
insert_before, ''.join(manifest_lines)))
old = manifest_lines[insert_line]
manifest_lines[insert_line] = (old[:insert_pos] + '\n' + inserted_string +
'\n' + old[insert_pos:])
with open(os.path.join(output_dir, manifest_name), 'w') as f :
f.write(''.join(manifest_lines))
def CopyIfChanged(src, target_dir):
"""Copy specified |src| file to |target_dir|, but only write to target if
the file has changed. This avoids a problem during packaging where parts of
the build have not completed and have the runtime DLL locked when we try to
copy over it. See http://crbug.com/305877 for details."""
assert os.path.isdir(target_dir)
dest = os.path.join(target_dir, os.path.basename(src))
g_archive_inputs.append(src)
if os.path.exists(dest):
# We assume the files are OK to buffer fully into memory since we know
# they're only 1-2M.
with open(src, 'rb') as fsrc:
src_data = fsrc.read()
with open(dest, 'rb') as fdest:
dest_data = fdest.read()
if src_data != dest_data:
# This may still raise if we get here, but this really should almost
# never happen (it would mean that the contents of e.g. msvcr100d.dll
# had been changed).
shutil.copyfile(src, dest)
else:
shutil.copyfile(src, dest)
# Taken and modified from:
# third_party\WebKit\Tools\Scripts\webkitpy\layout_tests\port\factory.py
def _read_configuration_from_gn(build_dir):
"""Return the configuration to used based on args.gn, if possible."""
path = os.path.join(build_dir, 'args.gn')
if not os.path.exists(path):
path = os.path.join(build_dir, 'toolchain.ninja')
if not os.path.exists(path):
# This does not appear to be a GN-based build directory, so we don't
# know how to interpret it.
return None
# toolchain.ninja exists, but args.gn does not; this can happen when
# `gn gen` is run with no --args.
return 'Debug'
args = open(path).read()
for l in args.splitlines():
# See the original of this function and then gn documentation for why this
# regular expression is correct:
# https://chromium.googlesource.com/chromium/src/+/master/tools/gn/docs/reference.md#GN-build-language-grammar
m = re.match('^\s*is_debug\s*=\s*false(\s*$|\s*#.*$)', l)
if m:
return 'Release'
# if is_debug is set to anything other than false, or if it
# does not exist at all, we should use the default value (True).
return 'Debug'
# Copy the relevant CRT DLLs to |build_dir|. We copy DLLs from all versions
# of VS installed to make sure we have the correct CRT version, unused DLLs
# should not conflict with the others anyways.
def CopyVisualStudioRuntimeDLLs(target_arch, build_dir):
is_debug = os.path.basename(build_dir).startswith('Debug')
if not is_debug and not os.path.basename(build_dir).startswith('Release'):
gn_type = _read_configuration_from_gn(build_dir)
if gn_type == 'Debug':
is_debug = True
elif gn_type == 'Release':
is_debug = False
else:
print ("Warning: could not determine build configuration from "
"output directory or args.gn, assuming Release build. If "
"setup.exe fails to launch, please check that your build "
"configuration is Release.")
crt_dlls = []
sys_dll_dir = None
if is_debug:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/"
"Debug_NonRedist/" + target_arch + "/Microsoft.*.DebugCRT/*.dll")
else:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/" +
target_arch + "/Microsoft.*.CRT/*.dll")
# Also handle the case where someone is building using only winsdk and
# doesn't have Visual Studio installed.
if not crt_dlls:
if target_arch == 'x64':
# check we are are on a 64bit system by existence of WOW64 dir
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/System32"
else:
# only support packaging of 64bit installer on 64bit system
# but this just as bad as not finding DLLs at all so we
# don't abort here to mirror behavior below
print ("Warning: could not find x64 CRT DLLs on x86 system.")
else:
# On a 64-bit system, 32-bit dlls are in SysWOW64 (don't ask).
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/SysWOW64"
else:
sys_dll_dir = "C:/Windows/System32"
if sys_dll_dir is not None:
if is_debug:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0d.dll"))
else:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0.dll"))
if not crt_dlls:
print ("Warning: could not find CRT DLLs to copy to build dir - target "
"may not run on a system that doesn't have those DLLs.")
for dll in crt_dlls:
CopyIfChanged(dll, build_dir)
# Copies component build DLLs and generates required config files and manifests
# in order for chrome.exe and setup.exe to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, target_arch, current_version):
# Get the required directories for the upcoming operations.
chrome_dir = os.path.join(staging_dir, CHROME_DIR)
version_dir = os.path.join(chrome_dir, current_version)
installer_dir = os.path.join(version_dir, 'Installer')
# |installer_dir| is technically only created post-install, but we need it
# now to add setup.exe's config and manifest to the archive.
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
# Copy the VS CRT DLLs to |build_dir|. This must be done before the general
# copy step below to ensure the CRT DLLs are added to the archive and marked
# as a dependency in the exe manifests generated below.
CopyVisualStudioRuntimeDLLs(target_arch, build_dir)
# Explicitly list the component DLLs setup.exe depends on (this list may
# contain wildcards). These will be copied to |installer_dir| in the archive.
# The use of source sets in gn builds means that references to some extra
# DLLs get pulled in to setup.exe (base_i18n.dll, ipc.dll, etc.). Unpacking
# these to |installer_dir| is simpler and more robust than switching setup.exe
# to use libraries instead of source sets.
setup_component_dll_globs = [ 'api-ms-win-*.dll',
'base.dll',
'boringssl.dll',
'crcrypto.dll',
'icui18n.dll',
'icuuc.dll',
'msvc*.dll',
'vcruntime*.dll',
# DLLs needed due to source sets.
'base_i18n.dll',
'ipc.dll',
'net.dll',
'prefs.dll',
'protobuf_lite.dll',
'url_lib.dll' ]
for setup_component_dll_glob in setup_component_dll_globs:
setup_component_dlls = glob.glob(os.path.join(build_dir,
setup_component_dll_glob))
if len(setup_component_dlls) == 0:
raise Exception('Error: missing expected DLL for component build '
'mini_installer: "%s"' % setup_component_dll_glob)
for setup_component_dll in setup_component_dlls:
g_archive_inputs.append(setup_component_dll)
shutil.copy(setup_component_dll, installer_dir)
# Stage all the component DLLs found in |build_dir| to the |version_dir| (for
# the version assembly to be able to refer to them below and make sure
# chrome.exe can find them at runtime). The component DLLs are considered to
# be all the DLLs which have not already been added to the |version_dir| by
# virtue of chrome.release.
build_dlls = glob.glob(os.path.join(build_dir, '*.dll'))
staged_dll_basenames = [os.path.basename(staged_dll) for staged_dll in \
glob.glob(os.path.join(version_dir, '*.dll'))]
component_dll_filenames = []
for component_dll in [dll for dll in build_dlls if \
os.path.basename(dll) not in staged_dll_basenames]:
component_dll_name = os.path.basename(component_dll)
# ash*.dll remoting_*.dll's don't belong in the archive (it doesn't depend
# on them in gyp). Trying to copy them causes a build race when creating the
# installer archive in component mode. See: crbug.com/180996 and
# crbug.com/586967
if (component_dll_name.startswith('remoting_') or
component_dll_name.startswith('ash')):
continue
component_dll_filenames.append(component_dll_name)
g_archive_inputs.append(component_dll)
shutil.copy(component_dll, version_dir)
# Augment {version}.manifest to include all component DLLs as part of the
# assembly it constitutes, which will allow dependents of this assembly to
# find these DLLs.
version_assembly_dll_additions = []
for dll_filename in component_dll_filenames:
version_assembly_dll_additions.append(" <file name='%s'/>" % dll_filename)
CopyAndAugmentManifest(build_dir, version_dir,
'%s.manifest' % current_version,
'\n'.join(version_assembly_dll_additions),
'</assembly>')
def main(options):
"""Main method that reads input file, creates archive file and write
resource input file.
"""
current_version = BuildVersion(options.build_dir)
config = Readconfig(options.input_file, current_version)
(staging_dir, temp_dir) = MakeStagingDirectories(options.staging_dir)
prev_version = GetPrevVersion(options.build_dir, temp_dir,
options.last_chrome_installer,
options.output_name)
# Preferentially copy the files we can find from the output_dir, as
# this is where we'll find the Syzygy-optimized executables when
# building the optimized mini_installer.
if options.build_dir != options.output_dir:
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.output_dir,
options.enable_hidpi)
# Now copy the remainder of the files from the build dir.
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.build_dir,
options.enable_hidpi)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir,
options.target_arch, current_version)
version_numbers = current_version.split('.')
current_build_number = version_numbers[2] + '.' + version_numbers[3]
prev_build_number = ''
if prev_version:
version_numbers = prev_version.split('.')
prev_build_number = version_numbers[2] + '.' + version_numbers[3]
# Name of the archive file built (for example - chrome.7z or
# patch-<old_version>-<new_version>.7z or patch-<new_version>.7z
archive_file = CreateArchiveFile(options, staging_dir,
current_build_number, prev_build_number)
setup_file = PrepareSetupExec(options,
current_build_number, prev_build_number)
CreateResourceInputFile(options.output_dir, options.setup_exe_format,
archive_file, setup_file, options.resource_file_path,
options.component_build == '1', staging_dir,
current_version)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. '
'Defaults to %s in the build directory.' %
MINI_INSTALLER_INPUT_FILE)
parser.add_option('-d', '--distribution',
help='Name of Chromium Distribution. Optional.')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building Chrome.7z archive if it exists.')
parser.add_option('-l', '--last_chrome_installer',
help='Generate differential installer. The value of this parameter '
'specifies the directory that contains base versions of '
'setup.exe, courgette.exe (if --diff_algorithm is COURGETTE) '
'& chrome.7z.')
parser.add_option('-f', '--setup_exe_format', default='COMPRESSED',
help='How setup.exe should be included {COMPRESSED|DIFF|FULL}.')
parser.add_option('-a', '--diff_algorithm', default='BSDIFF',
help='Diff algorithm to use when generating differential patches '
'{BSDIFF|COURGETTE}.')
parser.add_option('-n', '--output_name', default='chrome',
help='Name used to prefix names of generated archives.')
parser.add_option('--enable_hidpi', default='0',
help='Whether to include HiDPI resource files.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build. This will '
'also turn off compression of chrome.7z into chrome.packed.7z and '
'helpfully delete any old chrome.packed.7z in |output_dir|.')
parser.add_option('--depfile',
help='Generate a depfile with the given name listing the implicit inputs '
'to the archive process that can be used with a build system.')
parser.add_option('--target_arch', default='x86',
help='Specify the target architecture for installer - this is used '
'to determine which CRT runtime files to pull and package '
'with the installer archive {x86|x64}.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False)
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
if not options.output_dir:
options.output_dir = options.build_dir
if not options.resource_file_path:
options.resource_file_path = os.path.join(options.build_dir,
MINI_INSTALLER_INPUT_FILE)
return options
if '__main__' == __name__:
options = _ParseOptions()
if options.verbose:
print sys.argv
sys.exit(main(options))
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import random
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
from ironic.common import exception
from ironic.common import image_service
from ironic.common import keystone
CONF = cfg.CONF
_GLANCE_API_SERVER = None
""" iterator that cycles (indefinitely) over glance API servers. """
def _extract_attributes(image):
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'checksum', 'id',
'name', 'created_at', 'updated_at',
'deleted_at', 'deleted', 'status',
'min_disk', 'min_ram', 'is_public']
IMAGE_ATTRIBUTES_V2 = ['tags', 'visibility', 'protected',
'file', 'schema']
output = {}
for attr in IMAGE_ATTRIBUTES:
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
if hasattr(image, 'schema') and 'v2' in image['schema']:
IMAGE_ATTRIBUTES = IMAGE_ATTRIBUTES + IMAGE_ATTRIBUTES_V2
for attr in IMAGE_ATTRIBUTES_V2:
output[attr] = getattr(image, attr, None)
output['schema'] = image['schema']
for image_property in set(image.keys()) - set(IMAGE_ATTRIBUTES):
output['properties'][image_property] = image[image_property]
return output
def _convert_timestamps_to_datetimes(image_meta):
"""Convert timestamps to datetime objects
Returns image metadata with timestamp fields converted to naive UTC
datetime objects.
"""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.normalize_time(
timeutils.parse_isotime(image_meta[attr]))
return image_meta
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
return metadata
def _get_api_server_iterator():
"""Return iterator over shuffled API servers.
Shuffle a list of CONF.glance.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning if
necessary.
If CONF.glance.glance_api_servers isn't set, we fall back to using this
as the server: CONF.glance.glance_host:CONF.glance.glance_port.
If CONF.glance.glance_host is also not set, fetch the endpoint from the
service catalog.
:returns: iterator that cycles (indefinitely) over shuffled glance API
servers.
"""
api_servers = []
if not CONF.glance.glance_api_servers and not CONF.glance.glance_host:
session = keystone.get_session('glance',
auth=keystone.get_auth('glance'))
api_servers = [keystone.get_service_url(session, service_type='image',
endpoint_type='public')]
else:
configured_servers = (CONF.glance.glance_api_servers or
['%s:%s' % (CONF.glance.glance_host,
CONF.glance.glance_port)])
for api_server in configured_servers:
if '//' not in api_server:
api_server = '%s://%s' % (CONF.glance.glance_protocol,
api_server)
api_servers.append(api_server)
random.shuffle(api_servers)
return itertools.cycle(api_servers)
def _get_api_server():
"""Return a Glance API server.
:returns: for an API server, the tuple (host-or-IP, port, use_ssl), where
use_ssl is True to use the 'https' scheme, and False to use 'http'.
"""
global _GLANCE_API_SERVER
if not _GLANCE_API_SERVER:
_GLANCE_API_SERVER = _get_api_server_iterator()
return six.next(_GLANCE_API_SERVER)
def parse_image_ref(image_href):
"""Parse an image href.
:param image_href: href of an image
:returns: a tuple (image ID, glance URL, whether to use SSL)
:raises ValueError: when input image href is invalid
"""
if '/' not in six.text_type(image_href):
endpoint = _get_api_server()
return (image_href, endpoint, endpoint.startswith('https'))
else:
try:
url = urlparse.urlparse(image_href)
if url.scheme == 'glance':
endpoint = _get_api_server()
image_id = image_href.split('/')[-1]
return (image_id, endpoint, endpoint.startswith('https'))
else:
glance_port = url.port or 80
glance_host = url.netloc.split(':', 1)[0]
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
endpoint = '%s://%s:%s' % (url.scheme, glance_host,
glance_port)
return (image_id, endpoint, use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
def translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert(image_meta)
return image_meta
def is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
if image.is_public or context.is_admin:
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def is_glance_image(image_href):
if not isinstance(image_href, six.string_types):
return False
return (image_href.startswith('glance://') or
uuidutils.is_uuid_like(image_href))
def is_image_href_ordinary_file_name(image_href):
"""Check if image_href is a ordinary file name.
This method judges if image_href is a ordinary file name or not,
which is a file supposed to be stored in share file system.
The ordinary file name is neither glance image href
nor image service href.
:returns: True if image_href is ordinary file name, False otherwise.
"""
return not (is_glance_image(image_href) or
urlparse.urlparse(image_href).scheme.lower() in
image_service.protocol_mapping)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locked file interface that should work on Unix and Windows pythons.
This module first tries to use fcntl locking to ensure serialized access
to a file, then falls back on a lock file if that is unavialable.
Usage::
f = LockedFile('filename', 'r+b', 'rb')
f.open_and_lock()
if f.is_locked():
print('Acquired filename with r+b mode')
f.file_handle().write('locked data')
else:
print('Acquired filename with rb mode')
f.unlock_and_close()
"""
from __future__ import print_function
import errno
import logging
import os
import time
from oauth2client import util
__author__ = '[email protected] (David T McWherter)'
logger = logging.getLogger(__name__)
class CredentialsFileSymbolicLinkError(Exception):
"""Credentials files must not be symbolic links."""
class AlreadyLockedException(Exception):
"""Trying to lock a file that has already been locked by the LockedFile."""
pass
def validate_file(filename):
if os.path.islink(filename):
raise CredentialsFileSymbolicLinkError(
'File: %s is a symbolic link.' % filename)
class _Opener(object):
"""Base class for different locking primitives."""
def __init__(self, filename, mode, fallback_mode):
"""Create an Opener.
Args:
filename: string, The pathname of the file.
mode: string, The preferred mode to access the file with.
fallback_mode: string, The mode to use if locking fails.
"""
self._locked = False
self._filename = filename
self._mode = mode
self._fallback_mode = fallback_mode
self._fh = None
self._lock_fd = None
def is_locked(self):
"""Was the file locked."""
return self._locked
def file_handle(self):
"""The file handle to the file. Valid only after opened."""
return self._fh
def filename(self):
"""The filename that is being locked."""
return self._filename
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
"""
pass
def unlock_and_close(self):
"""Unlock and close the file."""
pass
class _PosixOpener(_Opener):
"""Lock files using Posix advisory lock files."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Tries to create a .lock file next to the file we're trying to open.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError if the file is a symbolic link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
self._locked = False
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError as e:
# If we can't access with _mode, try _fallback_mode and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
lock_filename = self._posix_lockfile(self._filename)
start_time = time.time()
while True:
try:
self._lock_fd = os.open(lock_filename,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
self._locked = True
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= timeout:
logger.warn('Could not acquire lock %s in %s seconds',
lock_filename, timeout)
# Close the file and open in fallback_mode.
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Unlock a file by removing the .lock file, and close the handle."""
if self._locked:
lock_filename = self._posix_lockfile(self._filename)
os.close(self._lock_fd)
os.unlink(lock_filename)
self._locked = False
self._lock_fd = None
if self._fh:
self._fh.close()
def _posix_lockfile(self, filename):
"""The name of the lock file to use for posix locking."""
return '%s.lock' % filename
try:
import fcntl
class _FcntlOpener(_Opener):
"""Open, lock, and unlock a file using fcntl.lockf."""
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError: if the file is a symbolic
link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError as e:
# If we can't access with _mode, try _fallback_mode and
# don't lock.
if e.errno in (errno.EPERM, errno.EACCES):
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
self._locked = True
return
except IOError as e:
# If not retrying, then just pass on the error.
if timeout == 0:
raise
if e.errno != errno.EACCES:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds',
self._filename, timeout)
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the fcntl.lockf primitive."""
if self._locked:
fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_FcntlOpener = None
try:
import pywintypes
import win32con
import win32file
class _Win32Opener(_Opener):
"""Open, lock, and unlock a file using windows primitives."""
# Error #33:
# 'The process cannot access the file because another process'
FILE_IN_USE_ERROR = 33
# Error #158:
# 'The segment is already unlocked.'
FILE_ALREADY_UNLOCKED_ERROR = 158
def open_and_lock(self, timeout, delay):
"""Open the file and lock it.
Args:
timeout: float, How long to try to lock for.
delay: float, How long to wait between retries
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
CredentialsFileSymbolicLinkError: if the file is a symbolic
link.
"""
if self._locked:
raise AlreadyLockedException('File %s is already locked' %
self._filename)
start_time = time.time()
validate_file(self._filename)
try:
self._fh = open(self._filename, self._mode)
except IOError as e:
# If we can't access with _mode, try _fallback_mode
# and don't lock.
if e.errno == errno.EACCES:
self._fh = open(self._filename, self._fallback_mode)
return
# We opened in _mode, try to lock the file.
while True:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.LockFileEx(
hfile,
(win32con.LOCKFILE_FAIL_IMMEDIATELY |
win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
pywintypes.OVERLAPPED())
self._locked = True
return
except pywintypes.error as e:
if timeout == 0:
raise
# If the error is not that the file is already
# in use, raise.
if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
raise
# We could not acquire the lock. Try again.
if (time.time() - start_time) >= timeout:
logger.warn('Could not lock %s in %s seconds' % (
self._filename, timeout))
if self._fh:
self._fh.close()
self._fh = open(self._filename, self._fallback_mode)
return
time.sleep(delay)
def unlock_and_close(self):
"""Close and unlock the file using the win32 primitive."""
if self._locked:
try:
hfile = win32file._get_osfhandle(self._fh.fileno())
win32file.UnlockFileEx(hfile, 0, -0x10000,
pywintypes.OVERLAPPED())
except pywintypes.error as e:
if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
raise
self._locked = False
if self._fh:
self._fh.close()
except ImportError:
_Win32Opener = None
class LockedFile(object):
"""Represent a file that has exclusive access."""
@util.positional(4)
def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
"""Construct a LockedFile.
Args:
filename: string, The path of the file to open.
mode: string, The mode to try to open the file with.
fallback_mode: string, The mode to use if locking fails.
use_native_locking: bool, Whether or not fcntl/win32 locking is
used.
"""
opener = None
if not opener and use_native_locking:
if _Win32Opener:
opener = _Win32Opener(filename, mode, fallback_mode)
if _FcntlOpener:
opener = _FcntlOpener(filename, mode, fallback_mode)
if not opener:
opener = _PosixOpener(filename, mode, fallback_mode)
self._opener = opener
def filename(self):
"""Return the filename we were constructed with."""
return self._opener._filename
def file_handle(self):
"""Return the file_handle to the opened file."""
return self._opener.file_handle()
def is_locked(self):
"""Return whether we successfully locked the file."""
return self._opener.is_locked()
def open_and_lock(self, timeout=0, delay=0.05):
"""Open the file, trying to lock it.
Args:
timeout: float, The number of seconds to try to acquire the lock.
delay: float, The number of seconds to wait between retry attempts.
Raises:
AlreadyLockedException: if the lock is already acquired.
IOError: if the open fails.
"""
self._opener.open_and_lock(timeout, delay)
def unlock_and_close(self):
"""Unlock and close a file."""
self._opener.unlock_and_close()
|
|
'''
Created on 8. des. 2017
@author: ljb
'''
from __future__ import division, absolute_import
from numba import jit, float64, void
import numpy as np
import matplotlib.pyplot as plt
import timeit
import scipy.linalg as linalg
from fys4150.project5.figure_saver import my_savefig
def fun(x):
return 100 * np.exp(-10 * x)
def u(x):
return 1 - (1 - np.exp(-10)) * x - np.exp(-10 * x)
@jit(void(float64[:], float64[:], float64[:], float64[:], float64[:], float64[:]))
def conda_tridiagonal_solve(x, temp, a, b, c, f):
'''
Solving a tridiagonal matrix equation [a, b, c]*x = f
'''
n = len(f)
# decomposition and forward substitution
btemp = b[0]
x[0] = f[0] / btemp
for i in xrange(1, n):
temp[i] = c[i - 1] / btemp
btemp = b[i] - a[i - 1] * temp[i]
x[i] = (f[i] - a[i - 1] * x[i - 1]) / btemp
# backwards substitution
for i in xrange(n - 2, -1, -1):
x[i] -= temp[i + 1] * x[i + 1]
def tridiagonal_solve(a, b, c, f):
'''
Solving a tridiagonal matrix equation [a, b, c]*x = f
'''
a, b, c, f = np.atleast_1d(a, b, c, f)
n = len(f)
x = np.zeros(n)
temp = np.zeros(n)
conda_tridiagonal_solve(x, temp, a, b, c, f)
return x
def tridiagonal_solve_periodic(a, b, c, f):
'''
Solving a periodic tridiagonal matrix equation
A * x = f
where
diag(A) = [b0, b1, b2, ...., bn-1]
diag(A, -1) = [a0, a1, a2, ...., an-2]
diag(A, 1) = [c0, c1, c2, ...., cn-2]
A[0, n-1] = an-1 (periodic boundary conditions)
A[n-1, 0] = cn-1 (periodic boundary conditions)
Reference
---------
https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
'''
a, b, c, f = np.atleast_1d(a, b, c, f)
n = len(f)
y = np.zeros(n)
q = np.zeros(n)
temp = np.zeros(n)
b1 = np.copy(b)
b0 = b1[0]
cn1 = c[n - 1]
an1 = a[n - 1]
b1[0] = b0 * 2
b1[n - 1] += cn1 * an1 / b0
u = np.zeros(n)
u[0] = -b0
u[-1] = cn1
v = np.zeros(n)
v[0] = 1
v[-1] = - an1 / b0
conda_tridiagonal_solve(y, temp, a, b1, c, f)
conda_tridiagonal_solve(q, temp, a, b1, c, u)
numerator = np.dot(v, y)
denominator = (1 + np.dot(v, q))
if denominator==0:
scale=1.49
else:
scale = numerator / denominator
return y - q * scale
def tridiagonal_solve_specific_periodic(f):
'''
Solving a periodic tridiagonal matrix equation
A * x = f
where
diag(A) = [2,2,2, ...., 2]
diag(A, -1) = diag(A, 1) =[-1, -1, -1, ...., -1]
A[0, n-1] = A[n-1, 0] = -1 (periodic boundary conditions)
Reference
---------
https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
'''
f = np.atleast_1d(f)
n = len(f)
a = -np.ones(n)
b = 2 * np.ones(n)
c = -np.ones(n)
return tridiagonal_solve_periodic(a, b, c, f)
def tridiagonal_solve_specific(b):
'''
Solving a tridiagonal matrix equation [-1, 2, -1]*x = b
'''
b = np.atleast_1d(b)
n = len(b)
x = np.zeros(n)
temp = np.zeros(n)
conda_tridiagonal_solve_spesific(x, temp, b)
return x
@jit(void(float64[:], float64[:], float64[:]))
def conda_tridiagonal_solve_spesific(x, temp, b):
'''
Solving a tridiagonal matrix equation [-1, 2, -1]*x = b
'''
n = len(b)
# decomposition and forward substitution
btemp = 2.0
x[0] = b[0] / btemp
for i in xrange(1, n):
temp[i] = -1.0 / btemp
btemp = 2.0 + temp[i]
x[i] = (b[i] + x[i - 1]) / btemp
# backwards substitution
for i in xrange(n - 2, -1, -1):
x[i] -= temp[i + 1] * x[i + 1]
def cpu_time(repetition=10, n=10**6):
'''
Grid n =10^6 and two repetitions gave an average of 7.62825565887 seconds.
'''
t = timeit.timeit('solve_poisson_general({})'.format(n),
setup='from __main__ import solve_poisson_general',
number=repetition) / repetition
print(t)
return t
def cpu_time_specific(repetition=10, n=10**6):
'''
Grid n =10^6 and two repetitions gave an average of 7.512299363 seconds.
'''
t = timeit.timeit('solve_poisson_specific({})'.format(n),
setup='from __main__ import solve_poisson_specific',
number=repetition) / repetition
print(t)
return t
def cpu_time_lu_solve(repetition=10, n=10):
'''
Grid n =10^6 and two repetitions gave an average of 7.512299363 seconds.
'''
t = timeit.timeit('solve_poisson_with_lu({})'.format(n),
setup='from __main__ import solve_poisson_with_lu',
number=repetition) / repetition
print(t)
return t
def solve_poisson_general(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
a = -np.ones(n)
b = 2 * np.ones(n)
c = -np.ones(n)
v = tridiagonal_solve(a, b, c, fx * h ** 2)
return x, v
def solve_poisson_specific(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
v = tridiagonal_solve_specific(fx * h ** 2)
return x, v
def solve_specific_periodic_with_lu(f):
f = np.atleast_1d(f)
n = len(f)
a = -np.ones(n - 1)
b = 2 * np.ones(n)
c = -np.ones(n - 1)
A = np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1)
A[0, -1] -= 1
A[-1, 0] -= 1
v = lu_solve(A, f)
return v
def solve_poisson_with_lu(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
a = -np.ones(n - 1)
b = 2 * np.ones(n)
c = -np.ones(n - 1)
A = np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1)
v = lu_solve(A, fx * h ** 2)
return x, v
def tri_solve_test_compare():
n = 10
for i, n in enumerate([10, 100, 1000]):
x, v = solve_poisson_specific(n)
plt.figure(i)
plt.plot(x, v, '.', label='numerical')
plt.plot(x, u(x), label='exact')
plt.title('n={}'.format(n))
plt.legend()
filename = "task1b_n{}.png".format(n)
my_savefig(filename)
def error_test():
max_error = []
problem_sizes = np.array([10, 100, 1000, 10000, 100000, 1000000, 10000000])
for n in problem_sizes:
x, v = solve_poisson_specific(n)
ui = u(x)
max_error.append(np.log10(max(np.abs((v - ui) / ui))))
h = 1 / (problem_sizes + 1)
plt.plot(np.log10(h), max_error)
plt.title('Relative error')
plt.xlabel('log10(h)')
plt.ylabel('log10(relative error)')
filename = 'error.png'
my_savefig(filename)
def lu_solve(A, b):
'''
Solves A*x= b
'''
A, b = np.atleast_1d(A, b)
lu_and_pivot = linalg.lu_factor(A)
x = linalg.lu_solve(lu_and_pivot, b)
return x
def lu_test_compare():
n = 10
for i, n in enumerate([10, 100, 1000]):
x, v = solve_poisson_with_lu(n)
plt.figure(i)
plt.plot(x, v, '.', label='numerical')
plt.plot(x, u(x), label='exact')
plt.title('n={}'.format(n))
plt.legend()
def plot_run_times(sizes=(10, 100, 1000, 2000)):
times = np.zeros((len(sizes), 3))
for i, n in enumerate(sizes):
times[i, 0] = cpu_time_specific(10, n)
times[i, 1] = cpu_time(10, n)
times[i, 2] = cpu_time_lu_solve(10, n)
for i, name in enumerate(['tri_spec', 'tri', 'lu']):
plt.loglog(sizes, times[:, i], label=name)
plt.legend()
if __name__ == '__main__':
# cpu_time(2)
# cpu_time_specific(2)
# tri_solve_test_compare()
# solve_poisson_with_lu(10)
# error_test()
# lu_test_compare()
# plot_run_times(sizes)
plt.show()
|
|
# -*- encoding: utf-8 *-*
import os
import io
import re
import sys
from collections import OrderedDict
from datetime import datetime
from glob import glob
from distutils.command.build import build
from distutils.core import Command
import textwrap
min_python = (3, 4)
my_python = sys.version_info
if my_python < min_python:
print("Borg requires Python %d.%d or later" % min_python)
sys.exit(1)
# Are we building on ReadTheDocs?
on_rtd = os.environ.get('READTHEDOCS')
# msgpack pure python data corruption was fixed in 0.4.6.
# Also, we might use some rather recent API features.
install_requires = ['msgpack-python>=0.4.6', ]
# note for package maintainers: if you package borgbackup for distribution,
# please add llfuse as a *requirement* on all platforms that have a working
# llfuse package. "borg mount" needs llfuse to work.
# if you do not have llfuse, do not require it, most of borgbackup will work.
extras_require = {
# llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0
# llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
# llfuse 2.0 will break API
'fuse': ['llfuse<2.0', ],
}
if sys.platform.startswith('freebsd'):
# llfuse was frequently broken / did not build on freebsd
# llfuse 0.41.1, 1.1 are ok
extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ]
from setuptools import setup, find_packages, Extension
from setuptools.command.sdist import sdist
from distutils.command.clean import clean
compress_source = 'src/borg/compress.pyx'
crypto_ll_source = 'src/borg/crypto/low_level.pyx'
chunker_source = 'src/borg/chunker.pyx'
hashindex_source = 'src/borg/hashindex.pyx'
item_source = 'src/borg/item.pyx'
checksums_source = 'src/borg/algorithms/checksums.pyx'
platform_posix_source = 'src/borg/platform/posix.pyx'
platform_linux_source = 'src/borg/platform/linux.pyx'
platform_darwin_source = 'src/borg/platform/darwin.pyx'
platform_freebsd_source = 'src/borg/platform/freebsd.pyx'
cython_sources = [
compress_source,
crypto_ll_source,
chunker_source,
hashindex_source,
item_source,
checksums_source,
platform_posix_source,
platform_linux_source,
platform_freebsd_source,
platform_darwin_source,
]
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Main as cython_compiler
class Sdist(sdist):
def __init__(self, *args, **kwargs):
for src in cython_sources:
cython_compiler.compile(src, cython_compiler.default_options)
super().__init__(*args, **kwargs)
def make_distribution(self):
self.filelist.extend([
'src/borg/compress.c',
'src/borg/crypto/low_level.c',
'src/borg/chunker.c', 'src/borg/_chunker.c',
'src/borg/hashindex.c', 'src/borg/_hashindex.c',
'src/borg/cache_sync/cache_sync.c', 'src/borg/cache_sync/sysdep.h', 'src/borg/cache_sync/unpack.h',
'src/borg/cache_sync/unpack_define.h', 'src/borg/cache_sync/unpack_template.h',
'src/borg/item.c',
'src/borg/algorithms/checksums.c',
'src/borg/algorithms/crc32_dispatch.c', 'src/borg/algorithms/crc32_clmul.c', 'src/borg/algorithms/crc32_slice_by_8.c',
'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c',
'src/borg/platform/posix.c',
'src/borg/platform/linux.c',
'src/borg/platform/freebsd.c',
'src/borg/platform/darwin.c',
])
super().make_distribution()
except ImportError:
class Sdist(sdist):
def __init__(self, *args, **kwargs):
raise Exception('Cython is required to run sdist')
compress_source = compress_source.replace('.pyx', '.c')
crypto_ll_source = crypto_ll_source.replace('.pyx', '.c')
chunker_source = chunker_source.replace('.pyx', '.c')
hashindex_source = hashindex_source.replace('.pyx', '.c')
item_source = item_source.replace('.pyx', '.c')
checksums_source = checksums_source.replace('.pyx', '.c')
platform_posix_source = platform_posix_source.replace('.pyx', '.c')
platform_linux_source = platform_linux_source.replace('.pyx', '.c')
platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')
platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')
from distutils.command.build_ext import build_ext
if not on_rtd and not all(os.path.exists(path) for path in [
compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source,
platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]):
raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
def detect_openssl(prefixes):
for prefix in prefixes:
filename = os.path.join(prefix, 'include', 'openssl', 'evp.h')
if os.path.exists(filename):
with open(filename, 'r') as fd:
if 'PKCS5_PBKDF2_HMAC(' in fd.read():
return prefix
def detect_lz4(prefixes):
for prefix in prefixes:
filename = os.path.join(prefix, 'include', 'lz4.h')
if os.path.exists(filename):
with open(filename, 'r') as fd:
if 'LZ4_decompress_safe' in fd.read():
return prefix
def detect_libb2(prefixes):
for prefix in prefixes:
filename = os.path.join(prefix, 'include', 'blake2.h')
if os.path.exists(filename):
with open(filename, 'r') as fd:
if 'blake2b_init' in fd.read():
return prefix
include_dirs = []
library_dirs = []
define_macros = []
crypto_libraries = ['crypto']
possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_OPENSSL_PREFIX'):
possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX'))
ssl_prefix = detect_openssl(possible_openssl_prefixes)
if not ssl_prefix:
raise Exception('Unable to find OpenSSL >= 1.0 headers. (Looked here: {})'.format(', '.join(possible_openssl_prefixes)))
include_dirs.append(os.path.join(ssl_prefix, 'include'))
library_dirs.append(os.path.join(ssl_prefix, 'lib'))
possible_lz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LZ4_PREFIX'):
possible_lz4_prefixes.insert(0, os.environ.get('BORG_LZ4_PREFIX'))
lz4_prefix = detect_lz4(possible_lz4_prefixes)
if lz4_prefix:
include_dirs.append(os.path.join(lz4_prefix, 'include'))
library_dirs.append(os.path.join(lz4_prefix, 'lib'))
elif not on_rtd:
raise Exception('Unable to find LZ4 headers. (Looked here: {})'.format(', '.join(possible_lz4_prefixes)))
possible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2',
'/usr/local/borg', '/opt/local', '/opt/pkg', ]
if os.environ.get('BORG_LIBB2_PREFIX'):
possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX'))
libb2_prefix = detect_libb2(possible_libb2_prefixes)
if libb2_prefix:
print('Detected and preferring libb2 over bundled BLAKE2')
include_dirs.append(os.path.join(libb2_prefix, 'include'))
library_dirs.append(os.path.join(libb2_prefix, 'lib'))
crypto_libraries.append('b2')
define_macros.append(('BORG_USE_LIBB2', 'YES'))
with open('README.rst', 'r') as fd:
long_description = fd.read()
# remove badges
long_description = re.compile(r'^\.\. start-badges.*^\.\. end-badges', re.M | re.S).sub('', long_description)
# remove |substitutions|
long_description = re.compile(r'\|screencast\|').sub('', long_description)
# remove unknown directives
long_description = re.compile(r'^\.\. highlight:: \w+$', re.M).sub('', long_description)
class build_usage(Command):
description = "generate usage for each command"
user_options = [
('output=', 'O', 'output directory'),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('generating usage docs')
import borg
borg.doc_mode = 'build_man'
if not os.path.exists('docs/usage'):
os.mkdir('docs/usage')
# allows us to build docs without the C modules fully loaded during help generation
from borg.archiver import Archiver
parser = Archiver(prog='borg').build_parser()
self.generate_level("", parser, Archiver)
def generate_level(self, prefix, parser, Archiver):
is_subcommand = False
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
is_subcommand = True
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if prefix and not choices:
return
print('found commands: %s' % list(choices.keys()))
for command, parser in sorted(choices.items()):
if command.startswith('debug'):
print('skipping', command)
continue
print('generating help for %s' % command)
if self.generate_level(command + " ", parser, Archiver):
continue
with open('docs/usage/%s.rst.inc' % command.replace(" ", "_"), 'w') as doc:
doc.write(".. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit!\n\n")
if command == 'help':
for topic in Archiver.helptext:
params = {"topic": topic,
"underline": '~' * len('borg help ' + topic)}
doc.write(".. _borg_{topic}:\n\n".format(**params))
doc.write("borg help {topic}\n{underline}\n\n".format(**params))
doc.write(Archiver.helptext[topic])
else:
params = {"command": command,
"command_": command.replace(' ', '_'),
"underline": '-' * len('borg ' + command)}
doc.write(".. _borg_{command_}:\n\n".format(**params))
doc.write("borg {command}\n{underline}\n.. code-block:: none\n\n borg [common options] {command}".format(**params))
self.write_usage(parser, doc)
epilog = parser.epilog
parser.epilog = None
self.write_options(parser, doc)
doc.write("\n\nDescription\n~~~~~~~~~~~\n")
doc.write(epilog)
if 'create' in choices:
common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
with open('docs/usage/common-options.rst.inc', 'w') as doc:
self.write_options_group(common_options, doc, False)
return is_subcommand
def write_usage(self, parser, fp):
if any(len(o.option_strings) for o in parser._actions):
fp.write(' [options]')
for option in parser._actions:
if option.option_strings:
continue
fp.write(' ' + option.metavar)
def write_options(self, parser, fp):
for group in parser._action_groups:
if group.title == 'Common options':
fp.write('\n\n:ref:`common_options`\n')
fp.write(' |')
else:
self.write_options_group(group, fp)
def write_options_group(self, group, fp, with_title=True):
def is_positional_group(group):
return any(not o.option_strings for o in group._group_actions)
def get_help(option):
text = textwrap.dedent((option.help or '') % option.__dict__)
return '\n'.join('| ' + line for line in text.splitlines())
def shipout(text):
fp.write(textwrap.indent('\n'.join(text), ' ' * 4))
if not group._group_actions:
return
if with_title:
fp.write('\n\n')
fp.write(group.title + '\n')
text = []
if is_positional_group(group):
for option in group._group_actions:
text.append(option.metavar)
text.append(textwrap.indent(option.help or '', ' ' * 4))
shipout(text)
return
options = []
for option in group._group_actions:
if option.metavar:
option_fmt = '``%%s %s``' % option.metavar
else:
option_fmt = '``%s``'
option_str = ', '.join(option_fmt % s for s in option.option_strings)
options.append((option_str, option))
for option_str, option in options:
help = textwrap.indent(get_help(option), ' ' * 4)
text.append(option_str)
text.append(help)
shipout(text)
class build_man(Command):
description = 'build man pages'
user_options = []
see_also = {
'create': ('delete', 'prune', 'check', 'patterns', 'placeholders', 'compression'),
'recreate': ('patterns', 'placeholders', 'compression'),
'list': ('info', 'diff', 'prune', 'patterns'),
'info': ('list', 'diff'),
'init': ('create', 'delete', 'check', 'list', 'key-import', 'key-export', 'key-change-passphrase'),
'key-import': ('key-export', ),
'key-export': ('key-import', ),
'mount': ('umount', 'extract'), # Would be cooler if these two were on the same page
'umount': ('mount', ),
'extract': ('mount', ),
}
rst_prelude = textwrap.dedent("""
.. role:: ref(title)
.. |project_name| replace:: Borg
""")
usage_group = {
'break-lock': 'lock',
'with-lock': 'lock',
'change-passphrase': 'key',
'key_change-passphrase': 'key',
'key_export': 'key',
'key_import': 'key',
'key_migrate-to-repokey': 'key',
'export-tar': 'tar',
'benchmark_crud': 'benchmark',
'umount': 'mount',
}
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print('building man pages (in docs/man)', file=sys.stderr)
import borg
borg.doc_mode = 'build_man'
os.makedirs('docs/man', exist_ok=True)
# allows us to build docs without the C modules fully loaded during help generation
from borg.archiver import Archiver
parser = Archiver(prog='borg').build_parser()
self.generate_level('', parser, Archiver)
self.build_topic_pages(Archiver)
self.build_intro_page()
def generate_level(self, prefix, parser, Archiver):
is_subcommand = False
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
is_subcommand = True
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
if command.startswith('debug') or command == 'help':
continue
man_title = 'borg-' + command.replace(' ', '-')
print('building man page', man_title + '(1)', file=sys.stderr)
is_intermediary = self.generate_level(command + ' ', parser, Archiver)
doc, write = self.new_doc()
self.write_man_header(write, man_title, parser.description)
self.write_heading(write, 'SYNOPSIS')
if is_intermediary:
subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]
for subcommand in subparsers.choices:
write('| borg', '[common options]', command, subcommand, '...')
self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand))
else:
write('borg', '[common options]', command, end='')
self.write_usage(write, parser)
write('\n')
description, _, notes = parser.epilog.partition('\n.. man NOTES')
if description:
self.write_heading(write, 'DESCRIPTION')
write(description)
if not is_intermediary:
self.write_heading(write, 'OPTIONS')
write('See `borg-common(1)` for common options of Borg commands.')
write()
self.write_options(write, parser)
self.write_examples(write, command)
if notes:
self.write_heading(write, 'NOTES')
write(notes)
self.write_see_also(write, man_title)
self.gen_man_page(man_title, doc.getvalue())
# Generate the borg-common(1) man page with the common options.
if 'create' in choices:
doc, write = self.new_doc()
man_title = 'borg-common'
self.write_man_header(write, man_title, 'Common options of Borg commands')
common_options = [group for group in choices['create']._action_groups if group.title == 'Common options'][0]
self.write_heading(write, 'SYNOPSIS')
self.write_options_group(write, common_options)
self.write_see_also(write, man_title)
self.gen_man_page(man_title, doc.getvalue())
return is_subcommand
def build_topic_pages(self, Archiver):
for topic, text in Archiver.helptext.items():
doc, write = self.new_doc()
man_title = 'borg-' + topic
print('building man page', man_title + '(1)', file=sys.stderr)
self.write_man_header(write, man_title, 'Details regarding ' + topic)
self.write_heading(write, 'DESCRIPTION')
write(text)
self.gen_man_page(man_title, doc.getvalue())
def build_intro_page(self):
print('building man page borg(1)', file=sys.stderr)
with open('docs/man_intro.rst') as fd:
man_intro = fd.read()
self.gen_man_page('borg', self.rst_prelude + man_intro)
def new_doc(self):
doc = io.StringIO(self.rst_prelude)
doc.read()
write = self.printer(doc)
return doc, write
def printer(self, fd):
def write(*args, **kwargs):
print(*args, file=fd, **kwargs)
return write
def write_heading(self, write, header, char='-', double_sided=False):
write()
if double_sided:
write(char * len(header))
write(header)
write(char * len(header))
write()
def write_man_header(self, write, title, description):
self.write_heading(write, title, '=', double_sided=True)
self.write_heading(write, description, double_sided=True)
# man page metadata
write(':Author: The Borg Collective')
write(':Date:', datetime.utcnow().date().isoformat())
write(':Manual section: 1')
write(':Manual group: borg backup tool')
write()
def write_examples(self, write, command):
command = command.replace(' ', '_')
with open('docs/usage/%s.rst' % self.usage_group.get(command, command)) as fd:
usage = fd.read()
usage_include = '.. include:: %s.rst.inc' % command
begin = usage.find(usage_include)
end = usage.find('.. include', begin + 1)
# If a command has a dedicated anchor, it will occur before the command's include.
if 0 < usage.find('.. _', begin + 1) < end:
end = usage.find('.. _', begin + 1)
examples = usage[begin:end]
examples = examples.replace(usage_include, '')
examples = examples.replace('Examples\n~~~~~~~~', '')
examples = examples.replace('Miscellaneous Help\n------------------', '')
examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE)
examples = examples.strip()
if examples:
self.write_heading(write, 'EXAMPLES', '-')
write(examples)
def write_see_also(self, write, man_title):
see_also = self.see_also.get(man_title.replace('borg-', ''), ())
see_also = ['`borg-%s(1)`' % s for s in see_also]
see_also.insert(0, '`borg-common(1)`')
self.write_heading(write, 'SEE ALSO')
write(', '.join(see_also))
def gen_man_page(self, name, rst):
from docutils.writers import manpage
from docutils.core import publish_string
# We give the source_path so that docutils can find relative includes
# as-if the document where located in the docs/ directory.
man_page = publish_string(source=rst, source_path='docs/virtmanpage.rst', writer=manpage.Writer())
with open('docs/man/%s.1' % name, 'wb') as fd:
fd.write(man_page)
def write_usage(self, write, parser):
if any(len(o.option_strings) for o in parser._actions):
write(' <options> ', end='')
for option in parser._actions:
if option.option_strings:
continue
write(option.metavar, end=' ')
def write_options(self, write, parser):
for group in parser._action_groups:
if group.title == 'Common options' or not group._group_actions:
continue
title = 'arguments' if group.title == 'positional arguments' else group.title
self.write_heading(write, title, '+')
self.write_options_group(write, group)
def write_options_group(self, write, group):
def is_positional_group(group):
return any(not o.option_strings for o in group._group_actions)
if is_positional_group(group):
for option in group._group_actions:
write(option.metavar)
write(textwrap.indent(option.help or '', ' ' * 4))
return
opts = OrderedDict()
for option in group._group_actions:
if option.metavar:
option_fmt = '%s ' + option.metavar
else:
option_fmt = '%s'
option_str = ', '.join(option_fmt % s for s in option.option_strings)
option_desc = textwrap.dedent((option.help or '') % option.__dict__)
opts[option_str] = textwrap.indent(option_desc, ' ' * 4)
padding = len(max(opts)) + 1
for option, desc in opts.items():
write(option.ljust(padding), desc)
def rm(file):
try:
os.unlink(file)
print('rm', file)
except FileNotFoundError:
pass
class Clean(clean):
def run(self):
super().run()
for source in cython_sources:
genc = source.replace('.pyx', '.c')
rm(genc)
compiled_glob = source.replace('.pyx', '.cpython*')
for compiled in sorted(glob(compiled_glob)):
rm(compiled)
cmdclass = {
'build_ext': build_ext,
'build_usage': build_usage,
'build_man': build_man,
'sdist': Sdist,
'clean': Clean,
}
ext_modules = []
if not on_rtd:
ext_modules += [
Extension('borg.compress', [compress_source], libraries=['lz4'], include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros),
Extension('borg.crypto.low_level', [crypto_ll_source], libraries=crypto_libraries, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros),
Extension('borg.hashindex', [hashindex_source]),
Extension('borg.item', [item_source]),
Extension('borg.chunker', [chunker_source]),
Extension('borg.algorithms.checksums', [checksums_source]),
]
if not sys.platform.startswith(('win32', )):
ext_modules.append(Extension('borg.platform.posix', [platform_posix_source]))
if sys.platform == 'linux':
ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl']))
elif sys.platform.startswith('freebsd'):
ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source]))
elif sys.platform == 'darwin':
ext_modules.append(Extension('borg.platform.darwin', [platform_darwin_source]))
setup(
name='borgbackup',
use_scm_version={
'write_to': 'src/borg/_version.py',
},
author='The Borg Collective (see AUTHORS file)',
author_email='[email protected]',
url='https://borgbackup.readthedocs.io/',
description='Deduplicated, encrypted, authenticated and compressed backups',
long_description=long_description,
license='BSD',
platforms=['Linux', 'MacOS X', 'FreeBSD', 'OpenBSD', 'NetBSD', ],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Security :: Cryptography',
'Topic :: System :: Archiving :: Backup',
],
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'borg = borg.archiver:main',
'borgfs = borg.archiver:main',
]
},
package_data={
'borg': ['paperkey.html']
},
cmdclass=cmdclass,
ext_modules=ext_modules,
setup_requires=['setuptools_scm>=1.7'],
install_requires=install_requires,
extras_require=extras_require,
)
print("Borg requires Python %d.%d or later" % min_python)
|
|
#!/usr/bin/env python
copyright = '''
/*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
'''
GENERATE, UBYTE, USHORT, UINT = 'generate', 'ubyte', 'ushort', 'uint'
FIRST, LAST = 'first', 'last'
INTYPES = (GENERATE, UBYTE, USHORT, UINT)
OUTTYPES = (USHORT, UINT)
PVS=(FIRST, LAST)
PRIMS=('points',
'lines',
'linestrip',
'lineloop',
'tris',
'trifan',
'tristrip',
'quads',
'quadstrip',
'polygon')
LONGPRIMS=('PIPE_PRIM_POINTS',
'PIPE_PRIM_LINES',
'PIPE_PRIM_LINE_STRIP',
'PIPE_PRIM_LINE_LOOP',
'PIPE_PRIM_TRIANGLES',
'PIPE_PRIM_TRIANGLE_FAN',
'PIPE_PRIM_TRIANGLE_STRIP',
'PIPE_PRIM_QUADS',
'PIPE_PRIM_QUAD_STRIP',
'PIPE_PRIM_POLYGON')
longprim = dict(zip(PRIMS, LONGPRIMS))
intype_idx = dict(ubyte='IN_UBYTE', ushort='IN_USHORT', uint='IN_UINT')
outtype_idx = dict(ushort='OUT_USHORT', uint='OUT_UINT')
pv_idx = dict(first='PV_FIRST', last='PV_LAST')
def prolog():
print '''/* File automatically generated by indices.py */'''
print copyright
print r'''
/**
* @file
* Functions to translate and generate index lists
*/
#include "indices/u_indices.h"
#include "indices/u_indices_priv.h"
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
static unsigned out_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return OUT_UINT;
case 2: return OUT_USHORT;
default: assert(0); return OUT_USHORT;
}
}
static unsigned in_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return IN_UINT;
case 2: return IN_USHORT;
case 1: return IN_UBYTE;
default: assert(0); return IN_UBYTE;
}
}
static u_translate_func translate[IN_COUNT][OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
static u_generate_func generate[OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
'''
def vert( intype, outtype, v0 ):
if intype == GENERATE:
return '(' + outtype + ')(' + v0 + ')'
else:
return '(' + outtype + ')in[' + v0 + ']'
def point( intype, outtype, ptr, v0 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
def line( intype, outtype, ptr, v0, v1 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
def tri( intype, outtype, ptr, v0, v1, v2 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
print ' (' + ptr + ')[2] = ' + vert( intype, outtype, v2 ) + ';'
def do_point( intype, outtype, ptr, v0 ):
point( intype, outtype, ptr, v0 )
def do_line( intype, outtype, ptr, v0, v1, inpv, outpv ):
if inpv == outpv:
line( intype, outtype, ptr, v0, v1 )
else:
line( intype, outtype, ptr, v1, v0 )
def do_tri( intype, outtype, ptr, v0, v1, v2, inpv, outpv ):
if inpv == outpv:
tri( intype, outtype, ptr, v0, v1, v2 )
else:
if inpv == FIRST:
tri( intype, outtype, ptr, v1, v2, v0 )
else:
tri( intype, outtype, ptr, v2, v0, v1 )
def do_quad( intype, outtype, ptr, v0, v1, v2, v3, inpv, outpv ):
do_tri( intype, outtype, ptr+'+0', v0, v1, v3, inpv, outpv );
do_tri( intype, outtype, ptr+'+3', v1, v2, v3, inpv, outpv );
def name(intype, outtype, inpv, outpv, prim):
if intype == GENERATE:
return 'generate_' + prim + '_' + outtype + '_' + inpv + '2' + outpv
else:
return 'translate_' + prim + '_' + intype + '2' + outtype + '_' + inpv + '2' + outpv
def preamble(intype, outtype, inpv, outpv, prim):
print 'static void ' + name( intype, outtype, inpv, outpv, prim ) + '('
if intype != GENERATE:
print ' const void * _in,'
print ' unsigned start,'
print ' unsigned nr,'
print ' void *_out )'
print '{'
if intype != GENERATE:
print ' const ' + intype + '*in = (const ' + intype + '*)_in;'
print ' ' + outtype + ' *out = (' + outtype + '*)_out;'
print ' unsigned i, j;'
print ' (void)j;'
def postamble():
print '}'
def points(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='points')
print ' for (i = start; i < (nr+start); i++) { '
do_point( intype, outtype, 'out+i', 'i' );
print ' }'
postamble()
def lines(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lines')
print ' for (i = start; i < (nr+start); i+=2) { '
do_line( intype, outtype, 'out+i', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def linestrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='linestrip')
print ' for (i = start, j = 0; j < nr; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def lineloop(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lineloop')
print ' for (i = start, j = 0; j < nr - 2; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
do_line( intype, outtype, 'out+j', 'i', '0', inpv, outpv );
postamble()
def tris(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tris')
print ' for (i = start; i < (nr+start); i+=3) { '
do_tri( intype, outtype, 'out+i', 'i', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def tristrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tristrip')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', 'i', 'i+1+(i&1)', 'i+2-(i&1)', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+(i&1)', 'i+1-(i&1)', 'i+2', inpv, outpv );
print ' }'
postamble()
def trifan(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='trifan')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def polygon(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='polygon')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+1', 'i+2', '0', inpv, outpv );
print ' }'
postamble()
def quads(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quads')
print ' for (i = start, j = 0; j < nr; j+=6, i+=4) { '
do_quad( intype, outtype, 'out+j', 'i+0', 'i+1', 'i+2', 'i+3', inpv, outpv );
print ' }'
postamble()
def quadstrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quadstrip')
print ' for (i = start, j = 0; j < nr; j+=6, i+=2) { '
do_quad( intype, outtype, 'out+j', 'i+2', 'i+0', 'i+1', 'i+3', inpv, outpv );
print ' }'
postamble()
def emit_funcs():
for intype in INTYPES:
for outtype in OUTTYPES:
for inpv in (FIRST, LAST):
for outpv in (FIRST, LAST):
points(intype, outtype, inpv, outpv)
lines(intype, outtype, inpv, outpv)
linestrip(intype, outtype, inpv, outpv)
lineloop(intype, outtype, inpv, outpv)
tris(intype, outtype, inpv, outpv)
tristrip(intype, outtype, inpv, outpv)
trifan(intype, outtype, inpv, outpv)
quads(intype, outtype, inpv, outpv)
quadstrip(intype, outtype, inpv, outpv)
polygon(intype, outtype, inpv, outpv)
def init(intype, outtype, inpv, outpv, prim):
if intype == GENERATE:
print ('generate[' +
outtype_idx[outtype] +
'][' + pv_idx[inpv] +
'][' + pv_idx[outpv] +
'][' + longprim[prim] +
'] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
else:
print ('translate[' +
intype_idx[intype] +
'][' + outtype_idx[outtype] +
'][' + pv_idx[inpv] +
'][' + pv_idx[outpv] +
'][' + longprim[prim] +
'] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
def emit_all_inits():
for intype in INTYPES:
for outtype in OUTTYPES:
for inpv in PVS:
for outpv in PVS:
for prim in PRIMS:
init(intype, outtype, inpv, outpv, prim)
def emit_init():
print 'void u_index_init( void )'
print '{'
print ' static int firsttime = 1;'
print ' if (!firsttime) return;'
print ' firsttime = 0;'
emit_all_inits()
print '}'
def epilog():
print '#include "indices/u_indices.c"'
def main():
prolog()
emit_funcs()
emit_init()
epilog()
if __name__ == '__main__':
main()
|
|
"""
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
import wx
import pickle
from tests import DataGen
import time
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
#matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
exited = False
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.datagen = DataGen()
self.datav = [self.datagen.next()[0]]
self.datac = [self.datagen.next()[1]]
self.paused = False
self.ocv = True
exited = False
self.create_menu()
self.create_status_bar()
self.create_main_panel()
#self.redraw_timer = wx.Timer(self)
#self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
#self.redraw_timer.Start(100)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.ocv_button = wx.Button(self.panel, -1, "Start")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_BUTTON, self.on_ocv_button, self.ocv_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_ocv_button, self.ocv_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.ocv_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi = 100
self.fig = Figure((3.0, 5.0), dpi=self.dpi)
self.axesv = self.fig.add_subplot(211)
self.axesc = self.fig.add_subplot(212)
self.axesv.set_axis_bgcolor('black')
self.axesv.set_title('Voltage (V)', size=12)
self.axesc.set_axis_bgcolor('black')
self.axesc.set_title('Current (mA)', size=12)
pylab.setp(self.axesv.get_xticklabels(), fontsize=8)
pylab.setp(self.axesv.get_yticklabels(), fontsize=8)
pylab.setp(self.axesc.get_xticklabels(), fontsize=8)
pylab.setp(self.axesc.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_datav = self.axesv.plot(
self.datav,
linewidth=1,
color=(1, 1, 0),
)[0]
self.plot_datac = self.axesc.plot(
self.datac,
linewidth=1,
color=(0, 0, 1),
)[0]
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
if self.xmax_control.is_auto():
xmaxv = xmaxc = len(self.datav) if len(self.datav) > 50 else 50
else:
xmaxv = float(self.xmax_control.manual_value())
xmaxc = float(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xminv = xminc = xmaxv - 50
else:
xminv = float(self.xmin_control.manual_value())
xminc = float(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
if self.ymin_control.is_auto():
yminv = min(self.datav[int(xminv):int(xmaxv)])
yminc = min(self.datac[int(xminc):int(xmaxc)])
else:
yminv = float(self.ymin_control.manual_value())
yminc = float(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
ymaxv = max(self.datav[int(xminv):int(xmaxv)])
ymaxc = max(self.datac[int(xminc):int(xmaxc)])
else:
ymaxv = float(self.ymax_control.manual_value())
ymaxc = float(self.ymax_control.manual_value())
self.axesv.set_xbound(lower=xminv, upper=xmaxv)
self.axesv.set_ybound(lower=yminv, upper=ymaxv)
self.axesc.set_xbound(lower=xminc, upper=xmaxc)
self.axesc.set_ybound(lower=yminc, upper=ymaxc)
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
if self.cb_grid.IsChecked():
self.axesv.grid(True, color='gray')
self.axesc.grid(True, color='gray')
else:
self.axesv.grid(False)
self.axesc.grid(False)
# Using setp here is convenient, because get_xticklabels
# returns a list over which one needs to explicitly
# iterate, and setp already handles this.
#
pylab.setp(self.axesv.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
pylab.setp(self.axesc.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
self.plot_datav.set_xdata(np.arange(len(self.datav)))
self.plot_datav.set_ydata(np.array(self.datav))
self.plot_datac.set_xdata(np.arange(len(self.datac)))
self.plot_datac.set_ydata(np.array(self.datac))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_ocv_button(self, event):
self.ocv = not self.ocv
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_update_ocv_button(self, event):
label = "Start" if self.ocv else "OCV"
self.ocv_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
"""
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
"""
#file_choices = "PICKLE (*.pickle)|*.pickle"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot",
#wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath() + "current.pickle"
outfile = open(path,"w")
pickle.dump(self.datac, outfile)
outfile.close()
path = dlg.GetPath() + "voltage.pickle"
outfile = open(path,"w")
pickle.dump(self.datav, outfile)
outfile.close()
path = dlg.GetPath()
outfile = open(path,"w")
outfile.close()
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
outfile.close()
path = dlg.GetPath()
path = path + "log.txt"
outfile = open(path, "w")
i = 0
for voltage, current in zip(self.datav, self.datac):
outfile.write("Sample: " + str(i) + ", Voltage: " + str(voltage) + ", Current(mA): " + str(current) + "\n")
i += 1
outfile.close()
def on_load_plot(self):
self.paused = True
file_choices = "PICKLE (*.pickle)|*.pickle"
dlg = wx.FileDialog(
self,
message="Load plot...",
defaultDir=os.getcwd(),
#wildcard=file_choices,
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath() + "current.pickle"
infile = open(path,"r")
self.datac = pickle.load(infile)
infile.close()
path = dlg.GetPath() + "voltage.pickle"
infile = open(path,"r")
self.datav = pickle.load(infile)
infile.close()
while True:
app.frame.on_redraw_timer(False)
time.sleep(.1)
def on_redraw_timer(self, pause):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused and not pause:
self.datav.append(self.datagen.next()[0])
self.datac.append(self.datagen.next()[1])
self.draw_plot()
def on_exit(self, event):
exited = True
self.datagen.erase()
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
app = wx.PySimpleApp()
def showRealTime():
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
def ocv_on():
return app.frame.ocv
def TickTock(pause):
if exited:
return False
else:
app.frame.on_redraw_timer(pause)
return True
def loadPlot():
app.frame.on_load_plot()
def paused():
return app.frame.paused
|
|
import pickle
import pytest
from pyrsistent import pdeque, dq
def test_basic_right_and_left():
x = pdeque([1, 2])
assert x.right == 2
assert x.left == 1
assert len(x) == 2
def test_construction_with_maxlen():
assert pdeque([1, 2, 3, 4], maxlen=2) == pdeque([3, 4])
assert pdeque([1, 2, 3, 4], maxlen=4) == pdeque([1, 2, 3, 4])
assert pdeque([], maxlen=2) == pdeque()
def test_construction_with_invalid_maxlen():
with pytest.raises(TypeError):
pdeque([], maxlen='foo')
with pytest.raises(ValueError):
pdeque([], maxlen=-3)
def test_pop():
x = pdeque([1, 2, 3, 4]).pop()
assert x.right == 3
assert x.left == 1
x = x.pop()
assert x.right == 2
assert x.left == 1
x = x.pop()
assert x.right == 1
assert x.left == 1
x = x.pop()
assert x == pdeque()
x = pdeque([1, 2]).pop()
assert x == pdeque([1])
x = x.pop()
assert x == pdeque()
assert pdeque().append(1).pop() == pdeque()
assert pdeque().appendleft(1).pop() == pdeque()
def test_pop_multiple():
assert pdeque([1, 2, 3, 4]).pop(3) == pdeque([1])
assert pdeque([1, 2]).pop(3) == pdeque()
def test_pop_with_negative_index():
assert pdeque([1, 2, 3]).pop(-1) == pdeque([1, 2, 3]).popleft(1)
assert pdeque([1, 2, 3]).popleft(-1) == pdeque([1, 2, 3]).pop(1)
def test_popleft():
x = pdeque([1, 2, 3, 4]).popleft()
assert x.left == 2
assert x.right == 4
x = x.popleft()
assert x.left == 3
assert x.right == 4
x = x.popleft()
assert x.right == 4
assert x.left == 4
x = x.popleft()
assert x == pdeque()
x = pdeque([1, 2]).popleft()
assert x == pdeque([2])
x = x.popleft()
assert x == pdeque()
assert pdeque().append(1).popleft() == pdeque()
assert pdeque().appendleft(1).popleft() == pdeque()
def test_popleft_multiple():
assert pdeque([1, 2, 3, 4]).popleft(3) == pdeque([4])
def test_left_on_empty_deque():
with pytest.raises(IndexError):
pdeque().left
def test_right_on_empty_deque():
with pytest.raises(IndexError):
pdeque().right
def test_pop_empty_deque_returns_empty_deque():
# The other option is to throw an index error, this is what feels best for now though
assert pdeque().pop() == pdeque()
assert pdeque().popleft() == pdeque()
def test_str():
assert str(pdeque([1, 2, 3])) == 'pdeque([1, 2, 3])'
assert str(pdeque([])) == 'pdeque([])'
assert str(pdeque([1, 2], maxlen=4)) == 'pdeque([1, 2], maxlen=4)'
def test_append():
assert pdeque([1, 2]).append(3).append(4) == pdeque([1, 2, 3, 4])
def test_append_with_maxlen():
assert pdeque([1, 2], maxlen=2).append(3).append(4) == pdeque([3, 4])
assert pdeque([1, 2], maxlen=3).append(3).append(4) == pdeque([2, 3, 4])
assert pdeque([], maxlen=0).append(1) == pdeque()
def test_appendleft():
assert pdeque([2, 1]).appendleft(3).appendleft(4) == pdeque([4, 3, 2, 1])
def test_appendleft_with_maxlen():
assert pdeque([2, 1], maxlen=2).appendleft(3).appendleft(4) == pdeque([4, 3])
assert pdeque([2, 1], maxlen=3).appendleft(3).appendleft(4) == pdeque([4, 3, 2])
assert pdeque([], maxlen=0).appendleft(1) == pdeque()
def test_extend():
assert pdeque([1, 2]).extend([3, 4]) == pdeque([1, 2, 3, 4])
def test_extend_with_maxlen():
assert pdeque([1, 2], maxlen=3).extend([3, 4]) == pdeque([2, 3, 4])
assert pdeque([1, 2], maxlen=2).extend([3, 4]) == pdeque([3, 4])
assert pdeque([], maxlen=2).extend([1, 2]) == pdeque([1, 2])
assert pdeque([], maxlen=0).extend([1, 2]) == pdeque([])
def test_extendleft():
assert pdeque([2, 1]).extendleft([3, 4]) == pdeque([4, 3, 2, 1])
def test_extendleft_with_maxlen():
assert pdeque([1, 2], maxlen=3).extendleft([3, 4]) == pdeque([4, 3, 1])
assert pdeque([1, 2], maxlen=2).extendleft([3, 4]) == pdeque([4, 3])
assert pdeque([], maxlen=2).extendleft([1, 2]) == pdeque([2, 1])
assert pdeque([], maxlen=0).extendleft([1, 2]) == pdeque([])
def test_count():
x = pdeque([1, 2, 3, 2, 1])
assert x.count(1) == 2
assert x.count(2) == 2
def test_remove():
assert pdeque([1, 2, 3, 4]).remove(2) == pdeque([1, 3, 4])
assert pdeque([1, 2, 3, 4]).remove(4) == pdeque([1, 2, 3])
# Right list must be reversed before removing element
assert pdeque([1, 2, 3, 3, 4, 5, 4, 6]).remove(4) == pdeque([1, 2, 3, 3, 5, 4, 6])
def test_remove_element_missing():
with pytest.raises(ValueError):
pdeque().remove(2)
with pytest.raises(ValueError):
pdeque([1, 2, 3]).remove(4)
def test_reverse():
assert pdeque([1, 2, 3, 4]).reverse() == pdeque([4, 3, 2, 1])
assert pdeque().reverse() == pdeque()
def test_rotate_right():
assert pdeque([1, 2, 3, 4, 5]).rotate(2) == pdeque([4, 5, 1, 2, 3])
assert pdeque([1, 2]).rotate(0) == pdeque([1, 2])
assert pdeque().rotate(2) == pdeque()
def test_rotate_left():
assert pdeque([1, 2, 3, 4, 5]).rotate(-2) == pdeque([3, 4, 5, 1, 2])
assert pdeque().rotate(-2) == pdeque()
def test_set_maxlen():
x = pdeque([], maxlen=4)
assert x.maxlen == 4
with pytest.raises(AttributeError):
x.maxlen = 5
def test_comparison():
small = pdeque([1, 2])
large = pdeque([1, 2, 3])
assert small < large
assert large > small
assert not small > large
assert not large < small
assert large != small
# Not equal to other types
assert small != [1, 2]
def test_pickling():
input = pdeque([1, 2, 3], maxlen=5)
output = pickle.loads(pickle.dumps(input, -1))
assert output == input
assert output.maxlen == input.maxlen
def test_indexing():
assert pdeque([1, 2, 3])[0] == 1
assert pdeque([1, 2, 3])[1] == 2
assert pdeque([1, 2, 3])[2] == 3
assert pdeque([1, 2, 3])[-1] == 3
assert pdeque([1, 2, 3])[-2] == 2
assert pdeque([1, 2, 3])[-3] == 1
def test_one_element_indexing():
assert pdeque([2])[0] == 2
assert pdeque([2])[-1] == 2
def test_empty_indexing():
with pytest.raises(IndexError):
assert pdeque([])[0] == 1
def test_indexing_out_of_range():
with pytest.raises(IndexError):
pdeque([1, 2, 3])[-4]
with pytest.raises(IndexError):
pdeque([1, 2, 3])[3]
with pytest.raises(IndexError):
pdeque([2])[-2]
def test_indexing_invalid_type():
with pytest.raises(TypeError) as e:
pdeque([1, 2, 3])['foo']
assert 'cannot be interpreted' in str(e.value)
def test_slicing():
assert pdeque([1, 2, 3])[1:2] == pdeque([2])
assert pdeque([1, 2, 3])[2:1] == pdeque([])
assert pdeque([1, 2, 3])[-2:-1] == pdeque([2])
assert pdeque([1, 2, 3])[::2] == pdeque([1, 3])
def test_hashing():
assert hash(pdeque([1, 2, 3])) == hash(pdeque().append(1).append(2).append(3))
def test_index():
assert pdeque([1, 2, 3]).index(3) == 2
def test_literalish():
assert dq(1, 2, 3) == pdeque([1, 2, 3])
def test_supports_weakref():
import weakref
weakref.ref(dq(1, 2))
def test_iterable():
"""
PDeques can be created from iterables even though they can't be len()
hinted.
"""
assert pdeque(iter("a")) == pdeque(iter("a"))
|
|
"""Support for SimpliSafe alarm systems."""
import asyncio
from uuid import UUID
from simplipy import API
from simplipy.errors import EndpointUnavailable, InvalidCredentialsError, SimplipyError
import voluptuous as vol
from homeassistant.const import ATTR_CODE, CONF_CODE, CONF_TOKEN, CONF_USERNAME
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DATA_CLIENT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
LOGGER,
VOLUMES,
)
DATA_LISTENER = "listener"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
DEFAULT_SOCKET_MIN_RETRY = 15
PLATFORMS = (
"alarm_control_panel",
"binary_sensor",
"lock",
"sensor",
)
ATTR_CATEGORY = "category"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
SERVICE_BASE_SCHEMA = vol.Schema({vol.Required(ATTR_SYSTEM_ID): cv.positive_int})
SERVICE_REMOVE_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string}
)
SERVICE_SET_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL): cv.string, vol.Required(ATTR_PIN_VALUE): cv.string}
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=30, max=480),
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=30, max=255),
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.total_seconds(), vol.Range(max=255)
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period,
lambda value: value.total_seconds(),
vol.Range(min=45, max=255),
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.total_seconds(), vol.Range(max=255)
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.Coerce(int), vol.In(VOLUMES)
),
}
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
@callback
def _async_save_refresh_token(hass, config_entry, token):
"""Save a refresh token to the config entry."""
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_TOKEN: token}
)
async def async_get_client_id(hass):
"""Get a client ID (based on the HASS unique ID) for the SimpliSafe API.
Note that SimpliSafe requires full, "dashed" versions of UUIDs.
"""
hass_id = await hass.helpers.instance_id.async_get()
return str(UUID(hass_id))
async def async_register_base_station(hass, system, config_entry_id):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, system.serial)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
async def async_setup(hass, config):
"""Set up the SimpliSafe component."""
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry): # noqa: C901
"""Set up SimpliSafe as config entry."""
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = []
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_USERNAME]
if CONF_CODE in config_entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
client_id = await async_get_client_id(hass)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.login_via_token(
config_entry.data[CONF_TOKEN], client_id=client_id, session=websession
)
except InvalidCredentialsError:
LOGGER.error("Invalid credentials provided")
return False
except SimplipyError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
_async_save_refresh_token(hass, config_entry, api.refresh_token)
simplisafe = SimpliSafe(hass, api, config_entry)
try:
await simplisafe.async_init()
except SimplipyError as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = simplisafe
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
@callback
def verify_system_exists(coro):
"""Log an error if a service call uses an invalid system ID."""
async def decorator(call):
"""Decorate."""
system_id = int(call.data[ATTR_SYSTEM_ID])
if system_id not in simplisafe.systems:
LOGGER.error("Unknown system ID in service call: %s", system_id)
return
await coro(call)
return decorator
@callback
def v3_only(coro):
"""Log an error if the decorated coroutine is called with a v2 system."""
async def decorator(call):
"""Decorate."""
system = simplisafe.systems[int(call.data[ATTR_SYSTEM_ID])]
if system.version != 3:
LOGGER.error("Service only available on V3 systems")
return
await coro(call)
return decorator
@verify_system_exists
@_verify_domain_control
async def clear_notifications(call):
"""Clear all active notifications."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.clear_notifications()
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def remove_pin(call):
"""Remove a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def set_pin(call):
"""Set a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@v3_only
@_verify_domain_control
async def set_system_properties(call):
"""Set one or more system parameters."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_properties(
{
prop: value
for prop, value in call.data.items()
if prop != ATTR_SYSTEM_ID
}
)
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
for service, method, schema in [
("clear_notifications", clear_notifications, None),
("remove_pin", remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
("set_pin", set_pin, SERVICE_SET_PIN_SCHEMA),
(
"set_system_properties",
set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
]:
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id].append(
config_entry.add_update_listener(async_reload_entry)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a SimpliSafe config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
for remove_listener in hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id):
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
"""Handle an options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
class SimpliSafe:
"""Define a SimpliSafe data object."""
def __init__(self, hass, api, config_entry):
"""Initialize."""
self._api = api
self._emergency_refresh_token_used = False
self._hass = hass
self._system_notifications = {}
self.config_entry = config_entry
self.coordinator = None
self.systems = {}
@callback
def _async_process_new_notifications(self, system):
"""Act on any new system notifications."""
if self._hass.state != CoreState.running:
# If HASS isn't fully running yet, it may cause the SIMPLISAFE_NOTIFICATION
# event to fire before dependent components (like automation) are fully
# ready. If that's the case, skip:
return
latest_notifications = set(system.notifications)
to_add = latest_notifications.difference(
self._system_notifications[system.system_id]
)
if not to_add:
return
LOGGER.debug("New system notifications: %s", to_add)
self._system_notifications[system.system_id].update(to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
async def async_init(self):
"""Initialize the data class."""
self.systems = await self._api.get_systems()
for system in self.systems.values():
self._system_notifications[system.system_id] = set()
self._hass.async_create_task(
async_register_base_station(
self._hass, system, self.config_entry.entry_id
)
)
self.coordinator = DataUpdateCoordinator(
self._hass,
LOGGER,
name=self.config_entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=self.async_update,
)
async def async_update(self):
"""Get updated data from SimpliSafe."""
async def async_update_system(system):
"""Update a system."""
await system.update(cached=system.version != 3)
self._async_process_new_notifications(system)
tasks = [async_update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
if self._emergency_refresh_token_used:
raise ConfigEntryAuthFailed(
"Update failed with stored refresh token"
)
LOGGER.warning("SimpliSafe cloud error; trying stored refresh token")
self._emergency_refresh_token_used = True
try:
await self._api.refresh_access_token(
self.config_entry.data[CONF_TOKEN]
)
return
except SimplipyError as err:
raise UpdateFailed( # pylint: disable=raise-missing-from
f"Error while using stored refresh token: {err}"
)
if isinstance(result, EndpointUnavailable):
# In case the user attempts an action not allowed in their current plan,
# we merely log that message at INFO level (so the user is aware,
# but not spammed with ERROR messages that they cannot change):
LOGGER.info(result)
if isinstance(result, SimplipyError):
raise UpdateFailed(f"SimpliSafe error while updating: {result}")
if self._api.refresh_token != self.config_entry.data[CONF_TOKEN]:
_async_save_refresh_token(
self._hass, self.config_entry, self._api.refresh_token
)
# If we've reached this point using an emergency refresh token, we're in the
# clear and we can discard it:
if self._emergency_refresh_token_used:
self._emergency_refresh_token_used = False
class SimpliSafeEntity(CoordinatorEntity):
"""Define a base SimpliSafe entity."""
def __init__(self, simplisafe, system, name, *, serial=None):
"""Initialize."""
super().__init__(simplisafe.coordinator)
self._name = name
self._online = True
self._simplisafe = simplisafe
self._system = system
if serial:
self._serial = serial
else:
self._serial = system.serial
self._attrs = {ATTR_SYSTEM_ID: system.system_id}
self._device_info = {
"identifiers": {(DOMAIN, system.system_id)},
"manufacturer": "SimpliSafe",
"model": system.version,
"name": name,
"via_device": (DOMAIN, system.serial),
}
@property
def available(self):
"""Return whether the entity is available."""
# We can easily detect if the V3 system is offline, but no simple check exists
# for the V2 system. Therefore, assuming the coordinator hasn't failed, we mark
# the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
# 2. We can verify that the entity is online
return not (self._system.version == 3 and self._system.offline) and self._online
@property
def device_info(self):
"""Return device registry information for this entity."""
return self._device_info
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return f"{self._system.address} {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._serial
@callback
def _handle_coordinator_update(self):
"""Update the entity with new REST API data."""
self.async_update_from_rest_api()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.async_update_from_rest_api()
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
raise NotImplementedError()
class SimpliSafeBaseSensor(SimpliSafeEntity):
"""Define a SimpliSafe base (binary) sensor."""
def __init__(self, simplisafe, system, sensor):
"""Initialize."""
super().__init__(simplisafe, system, sensor.name, serial=sensor.serial)
self._device_info["identifiers"] = {(DOMAIN, sensor.serial)}
self._device_info["model"] = sensor.type.name
self._device_info["name"] = sensor.name
self._sensor = sensor
self._sensor_type_human_name = " ".join(
[w.title() for w in self._sensor.type.name.split("_")]
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._system.address} {self._name} {self._sensor_type_human_name}"
|
|
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import neutron_fwaas.services.firewall.drivers.mcafee as mcafee
import neutron_fwaas.services.firewall.drivers.mcafee.ngfw_fwaas as fwaas
from neutron.tests import base
FAKE_FIREWALL_ID = 'firewall_id'
FAKE_POLICY_ID = 'policy_id'
FAKE_TENANT_ID = 'tenant_id'
FAKE_ROUTER_ID = 'router_id'
FAKE_FW_NAME = 'fw_name'
class NGFWFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(NGFWFwaasTestCase, self).setUp()
self.firewall = fwaas.NgfwFwaasDriver()
self.rule_list = self._fake_ipv4_rules()
self.apply_list = self._fake_apply_list()
self.post_return = mock.MagicMock()
self.tmp_ref = 'temp_ref'
self.post_return.headers = {'location': self.tmp_ref}
# we generate the policy name by formatting the ids of firewall,
# policy, router
self.policy_name = "%s_%s_%s" % (
FAKE_FIREWALL_ID[0:7], FAKE_POLICY_ID[0:7],
FAKE_ROUTER_ID[0:7])
def _fake_ipv4_rules(self):
rule1 = {'action': 'deny',
'description': '',
'destination_ip_address': None,
'destination_port': '23',
'enabled': True,
'firewall_policy_id': FAKE_POLICY_ID,
'id': '1',
'ip_version': 4,
'name': 'a2',
'position': 1,
'protocol': 'udp',
'shared': False,
'source_ip_address': None,
'source_port': '23',
'tenant_id': FAKE_TENANT_ID}
rule2 = {'action': 'deny',
'description': '',
'destination_ip_address': None,
'destination_port': None,
'enabled': True,
'firewall_policy_id': FAKE_POLICY_ID,
'id': '2',
'ip_version': 4,
'name': 'a3',
'position': 2,
'protocol': 'icmp',
'shared': False,
'source_ip_address': '192.168.100.0/24',
'source_port': None,
'tenant_id': FAKE_TENANT_ID}
rule3 = {'action': 'allow',
'description': '',
'destination_ip_address': None,
'destination_port': None,
'enabled': True,
'firewall_policy_id': FAKE_POLICY_ID,
'id': '3',
'ip_version': 4,
'name': 'a4',
'position': 3,
'protocol': 'tcp',
'shared': False,
'source_ip_address': None,
'source_port': None,
'tenant_id': FAKE_TENANT_ID}
return [rule1, rule2, rule3]
def _fake_firewall(self, rule_list):
fw = {
'admin_state_up': True,
'description': '',
'firewall_policy_id': FAKE_POLICY_ID,
'id': FAKE_FIREWALL_ID,
'name': FAKE_FW_NAME,
'shared': None,
'status': 'PENDING_CREATE',
'tenant_id': FAKE_TENANT_ID,
'firewall_rule_list': rule_list}
return fw
def _fake_apply_list(self):
apply_list = []
router_info_inst = mock.Mock()
fake_interface = mock.Mock()
router_inst = (
{'_interfaces': fake_interface,
'admin_state_up': True,
'distributed': False,
'external_gateway_info': None,
'gw_port_id': None,
'ha': False,
'ha_vr_id': 0,
'id': FAKE_ROUTER_ID,
'name': 'rrr1',
'routes': [],
'status': 'ACTIVE',
'tenant_id': FAKE_TENANT_ID})
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
return apply_list
def test_update_firewall(self):
firewall = self._fake_firewall(self.rule_list)
ref_v4rule = self.tmp_ref + "/fw_ipv4_access_rule"
ref_upload = self.tmp_ref + "/upload"
# NOTE(cbrandily): we replace jsonutils.dumps by identity in order
# to compare dictionaries instead of their json dumps and avoid to
# assume how jsonutils.dumps order dictionaries.
with mock.patch('oslo_serialization.jsonutils.dumps',
side_effect=lambda x: x), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'login'), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'get'), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'logout'), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'post',
return_value=self.post_return) as post:
expected = [
mock.call(
'elements/fw_policy',
{"name": self.policy_name, "template": None}),
mock.call(
'elements/udp_service',
{"min_dst_port": 23, "max_dst_port": 23,
"name": "service-a2", "max_src_port": 23,
"min_src_port": 23}),
mock.call(
ref_v4rule,
{"action": {"action": "discard",
"connection_tracking_options": {}},
"services": {"service": [self.tmp_ref]},
"sources": {"src": ["None"]},
"name": "a2",
"destinations": {"dst": ["None"]}},
raw=True),
mock.call(
'elements/network',
{"ipv4_network": "192.168.100.0/24",
"name": "network-192.168.100.0/24"}),
mock.call(
'elements/icmp_service',
{"icmp_code": 0, "icmp_type": 0, "name": "service22"}),
mock.call(
ref_v4rule,
{"action": {"action": "discard",
"connection_tracking_options": {}},
"services": {"service": [self.tmp_ref]},
"sources": {"src": [self.tmp_ref]},
"name": "a3",
"destinations": {"dst": ["None"]}},
raw=True),
mock.call(
'elements/tcp_service',
{"min_dst_port": 0, "max_dst_port": 65535,
"name": "service-a4", "max_src_port": 65535,
"min_src_port": 0}),
mock.call(
ref_v4rule,
{"action": {"action": "allow",
"connection_tracking_options": {}},
"services": {"service": [self.tmp_ref]},
"sources": {"src": ["None"]},
"name": "a4",
"destinations": {"dst": ["None"]}},
raw=True),
mock.call(ref_upload, '', raw=True)]
self.firewall.update_firewall('legacy', self.apply_list, firewall)
self.assertEqual(expected, post.call_args_list)
def test_create_firewall(self):
self.test_update_firewall()
def test_delete_firewall(self):
firewall = self._fake_firewall(self.rule_list)
get_value = [{'result': [{'name': self.policy_name,
'href': self.tmp_ref}, ]}, ]
with mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'login'), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'get',
return_value=get_value),\
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'logout'), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'post',
return_value=self.post_return), \
mock.patch.object(mcafee.smc_api.SMCAPIConnection, 'delete'
) as delete:
self.firewall.delete_firewall('legacy', self.apply_list, firewall)
expected = [
mock.call(self.tmp_ref, raw=True),
mock.call(self.tmp_ref, raw=True)
]
self.assertEqual(expected, delete.call_args_list)
|
|
#!/usr/bin/env python2.7
# WARNING! This a Python 2 script. Read README.rst for rationale.
"""
Usage: make_lite.py <wrapped_routines_file> <lapack_dir>
Typical invocation:
make_lite.py wrapped_routines /tmp/lapack-3.x.x
Requires the following to be on the path:
* f2c
* patch
"""
import sys
import os
import re
import subprocess
import shutil
import fortran
import clapack_scrub
try:
from distutils.spawn import find_executable as which # Python 2
except ImportError:
from shutil import which # Python 3
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
# -C to check array subscripts
F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
HEADER_BLURB = '''\
/*
* NOTE: This is generated code. Look in numpy/linalg/lapack_lite for
* information on remaking this file.
*/
'''
HEADER = HEADER_BLURB + '''\
#include "f2c.h"
#ifdef HAVE_CONFIG
#include "config.h"
#else
extern doublereal dlamch_(char *);
#define EPSILON dlamch_("Epsilon")
#define SAFEMINIMUM dlamch_("Safe minimum")
#define PRECISION dlamch_("Precision")
#define BASE dlamch_("Base")
#endif
extern doublereal dlapy2_(doublereal *x, doublereal *y);
/*
f2c knows the exact rules for precedence, and so omits parentheses where not
strictly necessary. Since this is generated code, we don't really care if
it's readable, and we know what is written is correct. So don't warn about
them.
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wparentheses"
#endif
'''
class FortranRoutine:
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
def __init__(self, name=None, filename=None):
self.filename = filename
if name is None:
root, ext = os.path.splitext(filename)
name = root
self.name = name
self._dependencies = None
def dependencies(self):
if self._dependencies is None:
deps = fortran.getDependencies(self.filename)
self._dependencies = [d.lower() for d in deps]
return self._dependencies
def __repr__(self):
return "FortranRoutine({!r}, filename={!r})".format(self.name,
self.filename)
class UnknownFortranRoutine(FortranRoutine):
"""Wrapper for a Fortran routine for which the corresponding file
is not known.
"""
type = 'unknown'
def __init__(self, name):
FortranRoutine.__init__(self, name=name, filename='<unknown>')
def dependencies(self):
return []
class FortranLibrary:
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
self._src_dirs = src_dirs
self.names_to_routines = {}
def _findRoutine(self, rname):
rname = rname.lower()
for s in self._src_dirs:
ffilename = os.path.join(s, rname + '.f')
if os.path.exists(ffilename):
return self._newFortranRoutine(rname, ffilename)
return UnknownFortranRoutine(rname)
def _newFortranRoutine(self, rname, filename):
return FortranRoutine(rname, filename)
def addIgnorableRoutine(self, rname):
"""Add a routine that we don't want to consider when looking at
dependencies.
"""
rname = rname.lower()
routine = UnknownFortranRoutine(rname)
self.names_to_routines[rname] = routine
def addRoutine(self, rname):
"""Add a routine to the library.
"""
self.getRoutine(rname)
def getRoutine(self, rname):
"""Get a routine from the library. Will add if it's not found.
"""
unique = []
rname = rname.lower()
routine = self.names_to_routines.get(rname, unique)
if routine is unique:
routine = self._findRoutine(rname)
self.names_to_routines[rname] = routine
return routine
def allRoutineNames(self):
"""Return the names of all the routines.
"""
return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
for each routine in the library.
Returns a set of routine names that have the dependencies unresolved.
"""
done_this = set()
last_todo = set()
while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
for rn in todo:
r = self.getRoutine(rn)
deps = r.dependencies()
for d in deps:
self.addRoutine(d)
done_this.add(rn)
last_todo = todo
return todo
class LapackLibrary(FortranLibrary):
def _newFortranRoutine(self, rname, filename):
routine = FortranLibrary._newFortranRoutine(self, rname, filename)
if 'blas' in filename.lower():
routine.type = 'blas'
elif 'install' in filename.lower():
routine.type = 'config'
elif rname.startswith('z'):
routine.type = 'z_lapack'
elif rname.startswith('c'):
routine.type = 'c_lapack'
elif rname.startswith('s'):
routine.type = 's_lapack'
elif rname.startswith('d'):
routine.type = 'd_lapack'
else:
routine.type = 'lapack'
return routine
def allRoutinesByType(self, typename):
routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
print(desc)
for r in routines:
print('\t%s' % r.name)
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
if not os.path.exists(blas_src_dir):
blas_src_dir = os.path.join(lapack_dir, 'blas', 'src')
lapack_src_dir = os.path.join(lapack_dir, 'SRC')
if not os.path.exists(lapack_src_dir):
lapack_src_dir = os.path.join(lapack_dir, 'src')
install_src_dir = os.path.join(lapack_dir, 'INSTALL')
if not os.path.exists(install_src_dir):
install_src_dir = os.path.join(lapack_dir, 'install')
library = LapackLibrary([install_src_dir, blas_src_dir, lapack_src_dir])
for r in ignores:
library.addIgnorableRoutine(r)
for w in wrapped_routines:
library.addRoutine(w)
library.resolveAllDependencies()
return library
def getWrappedRoutineNames(wrapped_routines_file):
routines = []
ignores = []
with open(wrapped_routines_file) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('IGNORE:'):
line = line[7:].strip()
ig = line.split()
ignores.extend(ig)
else:
routines.append(line)
return routines, ignores
types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'}
def dumpRoutineNames(library, output_dir):
for typename in {'unknown'} | types:
routines = library.allRoutinesByType(typename)
filename = os.path.join(output_dir, typename + '_routines.lst')
with open(filename, 'w') as fo:
for r in routines:
deps = r.dependencies()
fo.write('%s: %s\n' % (r.name, ' '.join(deps)))
def concatenateRoutines(routines, output_file):
with open(output_file, 'w') as output_fo:
for r in routines:
with open(r.filename, 'r') as fo:
source = fo.read()
output_fo.write(source)
class F2CError(Exception):
pass
def runF2C(fortran_filename, output_dir):
fortran_filename = fortran_filename.replace('\\', '/')
try:
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
except subprocess.CalledProcessError:
raise F2CError
def scrubF2CSource(c_file):
with open(c_file) as fo:
source = fo.read()
source = clapack_scrub.scrubSource(source, verbose=True)
with open(c_file, 'w') as fo:
fo.write(HEADER)
fo.write(source)
def ensure_executable(name):
try:
which(name)
except Exception:
raise SystemExit(name + ' not found')
def create_name_header(output_dir):
routine_re = re.compile(r'^ (subroutine|.* function)\s+(\w+)\(.*$',
re.I)
extern_re = re.compile(r'^extern [a-z]+ ([a-z0-9_]+)\(.*$')
# BLAS/LAPACK symbols
symbols = set(['xerbla'])
for fn in os.listdir(output_dir):
fn = os.path.join(output_dir, fn)
if not fn.endswith('.f'):
continue
with open(fn, 'r') as f:
for line in f:
m = routine_re.match(line)
if m:
symbols.add(m.group(2).lower())
# f2c symbols
f2c_symbols = set()
with open('f2c.h', 'r') as f:
for line in f:
m = extern_re.match(line)
if m:
f2c_symbols.add(m.group(1))
with open(os.path.join(output_dir, 'lapack_lite_names.h'), 'w') as f:
f.write(HEADER_BLURB)
f.write(
"/*\n"
" * This file renames all BLAS/LAPACK and f2c symbols to avoid\n"
" * dynamic symbol name conflicts, in cases where e.g.\n"
" * integer sizes do not match with 'standard' ABI.\n"
" */\n")
# Rename BLAS/LAPACK symbols
for name in sorted(symbols):
f.write("#define %s_ BLAS_FUNC(%s)\n" % (name, name))
# Rename also symbols that f2c exports itself
f.write("\n"
"/* Symbols exported by f2c.c */\n")
for name in sorted(f2c_symbols):
f.write("#define %s numpy_lapack_lite_%s\n" % (name, name))
def main():
if len(sys.argv) != 3:
print(__doc__)
return
# Make sure that patch and f2c are found on path
ensure_executable('f2c')
ensure_executable('patch')
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
output_dir = os.path.join(os.path.dirname(__file__), 'build')
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
library = getLapackRoutines(wrapped_routines, ignores, lapack_src_dir)
dumpRoutineNames(library, output_dir)
for typename in types:
fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename)
c_file = fortran_file[:-2] + '.c'
print('creating %s ...' % c_file)
routines = library.allRoutinesByType(typename)
concatenateRoutines(routines, fortran_file)
# apply the patchpatch
patch_file = os.path.basename(fortran_file) + '.patch'
if os.path.exists(patch_file):
subprocess.check_call(['patch', '-u', fortran_file, patch_file])
print("Patched {}".format(fortran_file))
try:
runF2C(fortran_file, output_dir)
except F2CError:
print('f2c failed on %s' % fortran_file)
break
scrubF2CSource(c_file)
# patch any changes needed to the C file
c_patch_file = c_file + '.patch'
if os.path.exists(c_patch_file):
subprocess.check_call(['patch', '-u', c_file, c_patch_file])
print()
create_name_header(output_dir)
for fname in os.listdir(output_dir):
if fname.endswith('.c') or fname == 'lapack_lite_names.h':
print('Copying ' + fname)
shutil.copy(
os.path.join(output_dir, fname),
os.path.abspath(os.path.dirname(__file__)),
)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''NeuroM neuron checking functions.
Contains functions for checking validity of neuron neurites and somata.
Tests assumes neurites and/or soma have been succesfully built where applicable,
i.e. soma- and neurite-related structural tests pass.
'''
import numpy as np
from neurom import NeuriteType
from neurom.core import Tree, iter_segments
from neurom.core.dataformat import COLS
from neurom.morphmath import section_length, segment_length
from neurom.check.morphtree import get_flat_neurites, get_nonmonotonic_neurites
from neurom.fst import _neuritefunc as _nf
from neurom.check import CheckResult
from neurom._compat import zip
def _read_neurite_type(neurite):
'''Simply read the stored neurite type'''
return neurite.type
def has_axon(neuron, treefun=_read_neurite_type):
'''Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result
'''
return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites))
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
'''Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
'''
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number)
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
'''Check if a neuron has basal dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of basal dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
'''
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):
'''Check that a neuron has no flat neurites
Arguments:
neuron(Neuron): The neuron object to test
tol(float): tolerance
method(string): way of determining flatness, 'tolerance', 'ratio'
as described in :meth:`neurom.check.morphtree.get_flat_neurites`
Returns:
CheckResult with result
'''
return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
def has_all_monotonic_neurites(neuron, tol=1e-6):
'''Check that a neuron has only neurites that are monotonic
Arguments:
neuron(Neuron): The neuron object to test
tol(float): tolerance
Returns:
CheckResult with result
'''
return CheckResult(len(get_nonmonotonic_neurites(neuron, tol)) == 0)
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
'''Check presence of neuron segments with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments
'''
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for i, s in enumerate(zip(p[:-1], p[1:])):
if segment_length(s) <= threshold:
bad_ids.append((sec.id, i))
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_all_nonzero_section_lengths(neuron, threshold=0.0):
'''Check presence of neuron sections with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a section length is considered
to be non-zero
Returns:
CheckResult with result including list of ids bad sections
'''
bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites)
if section_length(s.points) <= threshold]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_all_nonzero_neurite_radii(neuron, threshold=0.0):
'''Check presence of neurite points with radius not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold: value above which a radius is considered to be non-zero
Returns:
CheckResult with result including list of (section ID, point ID) pairs
of zero-radius points
'''
bad_ids = []
seen_ids = set()
for s in _nf.iter_sections(neuron):
for i, p in enumerate(s.points):
info = (s.id, i)
if p[COLS.R] <= threshold and info not in seen_ids:
seen_ids.add(info)
bad_ids.append(info)
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_nonzero_soma_radius(neuron, threshold=0.0):
'''Check if soma radius not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold: value above which the soma radius is considered to be non-zero
Returns:
CheckResult with result
'''
return CheckResult(neuron.soma.radius > threshold)
def has_no_jumps(neuron, max_distance=30.0, axis='z'):
'''Check if there are jumps (large movements in the `axis`)
Arguments:
neuron(Neuron): The neuron object to test
max_z_distance(float): value above which consecutive z-values are
considered a jump
axis(str): one of x/y/z, which axis to check for jumps
Returns:
CheckResult with result list of ids bad sections
'''
bad_ids = []
axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()]
for sec in _nf.iter_sections(neuron):
for i, (p0, p1) in enumerate(iter_segments(sec)):
info = (sec.id, i)
if max_distance < abs(p0[axis] - p1[axis]):
bad_ids.append(info)
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_fat_ends(neuron, multiple_of_mean=2.0, final_point_count=5):
'''Check if leaf points are too large
Arguments:
neuron(Neuron): The neuron object to test
multiple_of_mean(float): how many times larger the final radius
has to be compared to the mean of the final points
final_point_count(int): how many points to include in the mean
Returns:
CheckResult with result list of ids bad sections
Note:
A fat end is defined as a leaf segment whose last point is larger
by a factor of `multiple_of_mean` than the mean of the points in
`final_point_count`
'''
bad_ids = []
for leaf in _nf.iter_sections(neuron.neurites, iterator_type=Tree.ileaf):
mean_radius = np.mean(leaf.points[-final_point_count:, COLS.R])
if mean_radius * multiple_of_mean < leaf.points[-1, COLS.R]:
bad_ids.append((leaf.id, len(leaf.points)))
return CheckResult(len(bad_ids) == 0, bad_ids)
|
|
import json
import configparser
import socket
import threading
import os
import sys
import traceback
from digtemp import read_dig_temp
sys.path.append( "../lib" )
from iseclogger import Logger
from rpiinfo import get_temperature
#remove _dud for production
from rpictrl import addGPIOEvent
from rpictrl import getGPIOInput
from rpictrl import getGPIOOutputStatusVerbose
from rpictrl import initOutputGPIO
import pdb
class PeripheralObject:
serialid = None
devid = None
name = None
description = None
ptype = None
pgpio = None
class LocalPeripherals:
CFG_PERI_SECTION_MAIN="main"
CFG_PERI_SECTION_PERI="peri_{0}"
CFG_PERI_ID = "id"
CFG_PERI_ID_SERIAL = "id_serial"
CFG_PERI_NAME = "name"
CFG_PERI_DESCRIPTION = "description"
CFG_PERI_TYPE="type"
CFG_PERI_COUNT="count"
CFG_PERI_GPIO="gpio"
ST_DEV_NAME = "device_name"
ST_DEV_ID = "device_id"
ST_PERIPHERAL_ID = "peripheral_id"
ST_DEV_SERIAL_ID = "device_serial_id"
ST_DEV_TYPE = "device_type"
ST_DEV_DESC = "device_desc"
filename = "../cfg/peripherals.cfg";
log = None
_peripherals = []
def __init__(self, logger):
self.log = logger
def load(self):
self._peripherals = []
peri_config = configparser.ConfigParser()
peri_config.read(self.filename)
sectionmain = dict(peri_config.items(self.CFG_PERI_SECTION_MAIN))
# sectionmain[self.CFG_PERI_COUNT]
pericount = int(sectionmain[self.CFG_PERI_COUNT]);
for x in range(0,pericount):
sectionkey = self.CFG_PERI_SECTION_PERI.format(x+1);
sectionperi = dict(peri_config.items(sectionkey));
# self.log.write("locperi",sectionperi);
pid = sectionperi[self.CFG_PERI_ID]
pids = sectionperi[self.CFG_PERI_ID_SERIAL]
pname = sectionperi[self.CFG_PERI_NAME]
pdesc = sectionperi[self.CFG_PERI_DESCRIPTION]
ptype = sectionperi[self.CFG_PERI_TYPE]
pGPIO = -1
if(self.CFG_PERI_GPIO in sectionperi):
pGPIO = sectionperi[self.CFG_PERI_GPIO];
self.addDevice(pid, pids, pname,pdesc,ptype,pGPIO);
#Output peripherals
PERI_TYPE_OUT_SAINTSMART_RELAY = "1";
PERI_TYPE_OUT_SPEAKER = "2";
PERI_TYPE_OUT_BUZZER = "3";
PERI_TYPE_OUT_CAMERA_CONTROL = "4";
PERI_TYPE_OUT_GRADIAN = "5";
PERI_TYPE_OUT_SERVO = "6";
PERI_TYPE_OUT_PIFACE_RELAY = "7";
#Input peripherals
PERI_TYPE_IN_RFI = "200";
PERI_TYPE_IN_VIDEO_CAMERA = "201";
PERI_TYPE_IN_BUTTON_SWITCH = "202";
PERI_TYPE_IN_HYDROMETER = "203";
PERI_TYPE_IN_THERMOMETER = "204";
PERI_TYPE_IN_PITHERMOMETER = "205";
PERI_TYPE_IN_ACCELEROMETER = "206";
PERI_TYPE_IN_POSITIONING_LOCATION ="207";
PERI_TYPE_IN_AMBIENT_LIGHT = "208";
PERI_TYPE_IN_MICROPHONE = "209";
PERI_TYPE_IN_INFRARED_CAMERA = "210";
PERI_TYPE_IN_MOTION_SENSOR = "211";
PERI_TYPE_IN_GEIGER_COUNTER = "212";
STATUS = "status";
def initSwitchPeripherals(self,switchcallback):
# init peripherals that required it
for po in self._peripherals:
if(po.ptype == self.PERI_TYPE_IN_BUTTON_SWITCH):
addGPIOEvent(po.pgpio, switchcallback);
def initializeGPIOOutputPeripherals(self):
for po in self._peripherals:
if(po.ptype == self.PERI_TYPE_OUT_SAINTSMART_RELAY):
initOutputGPIO(po.pgpio);
def getPeripheralsStatus(self,peripheralcontroller):
retval = [];
for po in self._peripherals:
if(po.ptype == self.PERI_TYPE_OUT_SAINTSMART_RELAY):
#status = peripheralcontroller.getPeripheralStatus(po.serialid)
#stat = {'' + self.ST_PERIPHERAL_ID + '':'' + po.devid
#+ '',''+self.STATUS + '':'' + status+''}
#retval.append(stat)
onoff = getGPIOOutputStatusVerbose(po.pgpio)
stat = {''+self.ST_PERIPHERAL_ID + '':'' + po.devid
+ '',''+self.STATUS + '':'' + onoff+''}
retval.append(stat)
if(po.ptype == self.PERI_TYPE_OUT_PIFACE_RELAY):
status = peripheralcontroller.getPIFACEPeripheralStatus(po.serialid)
stat = {'' + self.ST_PERIPHERAL_ID + '':'' + po.devid
+ '',''+self.STATUS + '':'' + status+''}
retval.append(stat)
elif(po.ptype == self.PERI_TYPE_IN_PITHERMOMETER):
tempc = get_temperature();
stat = {''+self.ST_PERIPHERAL_ID + '':'' + po.devid
+ '',''+self.STATUS + '':'' + tempc+''}
retval.append(stat)
elif(po.ptype == self.PERI_TYPE_IN_BUTTON_SWITCH):
onoff = getGPIOInput(po.pgpio)
stat = {''+self.ST_PERIPHERAL_ID + '':'' + po.devid
+ '',''+self.STATUS + '':'' + onoff+''}
retval.append(stat)
elif(po.ptype == self.PERI_TYPE_IN_THERMOMETER):
#onoff = getGPIOInput(po.pgpio)
self.log.write("Get Peripheral status","digitemp");
digtemp = read_dig_temp(int(po.serialid));
stat = {''+self.ST_PERIPHERAL_ID + '':'' + po.devid
+ '',''+self.STATUS + '':'' + digtemp[0]+''}
self.log.write("Get Peripheral status", stat);
retval.append(stat)
return retval;
def findPeripheral(self, deviceid):
retval = None
for po in self._peripherals:
#print("AAAAAAAA: " + po.devid);
if po.devid == deviceid:
retval = po
break
return retval
def findPeripheralByTypeAndGpio(self, ptype, pgpio):
retval = None
for po in self._peripherals:
if (po.ptype == ptype) and (po.pgpio == pgpio):
retval = po
break
return retval
def getPeripherals(self):
return self._peripherals
def addDevice(self, deviceid, serialid, name, description, ptype, pgpio):
po = PeripheralObject()
po.devid = deviceid
po.serialid = serialid
po.name = name
po.description = description
po.ptype = ptype
po.pgpio = pgpio;
self._peripherals.append(po)
def toJSON(self):
retval = [];
for po in self._peripherals:
#po = self._peripherals[i]
#self.log.write("locperi", po.name + po.devid)
onePeri = { ''+self.ST_DEV_NAME+'':'' + po.name
+ '',''+self.ST_DEV_ID + '':'' + po.devid+''
+ '',''+self.ST_DEV_SERIAL_ID + '':'' + po.serialid+''
+ '',''+self.ST_DEV_TYPE + '':'' + po.ptype+''
+ '',''+self.ST_DEV_DESC + '':'' + po.description+''}
# self.log.write("locperi", json.dumps(onePeri))
retval.append(onePeri)
return retval #json.dumps(retval)
|
|
#!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 24 February 2014
# Last update: 17 February 2020
# Location: /data/local/hadkw/HADCRUH2/UPDATE2015/PROGS/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This codes reads in the homogenised monthly mean data from PHA, outputs to ASCII, infilling
# hte missing years with missing data indicators (entire missing years are not printed by PHA).
# This code also plots the raw and homogenised station series alongside its raw neighbours with
# the linear trend (median pairwise) shown, for abs and anomaly annual means.
# It can cope with PHA, IDPHA and PHADPD homogenised modes. It doresn't need to be run for IDPHA
# though, nor is it essential to run for q, e, RH or Tw as we don't use the PHA output.
# When run for Td in PHADPD mode it creates homogenised Td from IDPHAt minus PHAdpd and outputs
# a merged log file which attempts to acumulate the changepoints appropriately.
#
# NB: In a few cases Td will not have neighbours to plot so prog will fail. Restart.
#
# Willett et al., 2014
# Willett, K. M., Dunn, R. J. H., Thorne, P. W., Bell, S., de Podesta, M., Parker, D. E., Jones, P. D., and Williams Jr.,
# C. N.: HadISDH land surface multi-variable humidity and temperature record for climate monitoring, Clim. Past, 10,
# 1983-2006, doi:10.5194/cp-10-1983-2014, 2014.
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt:
# import datetime as dt
# import matplotlib.pyplot as plt
# import numpy as np
# from matplotlib.dates import date2num,num2date
# import sys, os
# from scipy.optimize import curve_fit,fsolve,leastsq
# from scipy import pi,sqrt,exp
# from scipy.special import erf
# import scipy.stats
# from math import sqrt,pi
# import struct
# import pdb
#
# Kates:
# from LinearTrends import MedianPairwise - fits linear trend using median pairwise
#
# -----------------------
# DATA
# -----------------------
# # The 40 nearest correlating neighbours from PHA
# CORRFIL='/data/local/hadkw/HADCRUH2/PROGS/PHA2015/pha52jgo/data/hadisdh/7315q/corr/corr.log'
# The raw monthly mean station data
# INRAW='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/ASCII/QABS/'
# The PHA station list to work through
# STATLIST='/data/local/hadkw/HADCRUH2/UPDATE2015/LISTS_DOCS/goodforHadISDH.'+version+'_PHAq_'+thenmon+thenyear+'.txt'
# OR the IDPHA list to work through
# STATLIST='/data/local/hadkw/HADCRUH2/UPDATE2015/LISTS_DOCS/goodforHadISDH.'+version+'_IDPHAq_'+thenmon+thenyear+'.txt'
# Homogenised monthly mean station data from PHA
# INHOM='/data/local/hadkw/HADCRUH2/PROGS/PHA2015/pha52jgo/data/hadisdh/7315q/monthly/WMs.r00/'
# Homogenised monthly mean station data from IDPHA
# INHOM='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/IDPHAASCII/QDIR/'
# For TdL
# IDPHA homogenised monthly mean T for creating Td
# INHOMT='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/IDPHAASCII/TDIR/'
# PHA homogenised monthly mean DPD for creating Td
# INHOMDPD='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/PHAASCII/DPDDIR/'
# Log of changepoint locations and magnitudes and uncertainties for DPD to merge with T breaks
# DPDBREAKFIL='/data/local/hadkw/HADCRUH2/UPDATE2015/LISTS_DOCS/HadISDH.landDPD.'+version+'_PHA_'+thenmon+thenyear+'.log'
# Log of changepoint locations and magnitudes and uncertainties for T to merge with DPD breaks
# TBREAKFIL='/data/local/hadkw/HADCRUH2/UPDATE2015/LISTS_DOCS/HadISDH.landT.'+version+'_IDPHAMG_'+thenmon+thenyear+'.log'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Go through everything in the 'Start' section to make sure dates, versions and filepaths are up to date
# Choose param settings for the desired variable (also in 'Start' section)
# This can take an hour or so to run through ~3800 stations so consider using screen, screen -d, screen -r
# module load scitools/default-current
# python OutputPHAASCIIPLOT_HAN2015.py
# python2.7 OutputPHAASCIIPLOT_JAN2015.py
#
# NB: In a few cases Td will not have neighbours to plot so prog will fail. Restart.
#
# -----------------------
# OUTPUT
# -----------------------
# # PHA Plot showing raw and homogenised candidate vs raw neighbours with linear trends for abs and anomly monthly means
# OUTPLOT='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/QDIR/'
# or if IDPHA
# OUTPLOT='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/STAT_PLOTS/IDADJCOMP/QDIR/'
# PHA only: Output monthly mean homogenised ASCII with missing years infilled with missing data indicator
# OUTHOM='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/PHAASCII/QDIR/'
# For Derived Td mode (PHADPD)
# Output log of merged T and DPD changepoints, adjustments, uncertainties that essentially went into Td (indirectly as Td is
# created from T - DPD
# TDBREAKFIL='/data/local/hadkw/HADCRUH2/UPDATE2015/LISTS_DOCS/HadISDH.landTd.'+version+'_PHADPD_'+thenmon+thenyear+'.log'
# Derived Td is stored as for IDPHA:
# OUTHOM='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/IDPHAASCII/TDDIR/'
# OUTPLOT='/data/local/hadkw/HADCRUH2/UPDATE2015/MONTHLIES/HOMOG/STAT_PLOTS/IDADJCOMP/TDDIR/'
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 (17 February 2020)
# ---------
#
# Enhancements
#
# Changes
# Now python 3
#
# Bug fixes
#
#
# Version 2 (25 January 2017)
# ---------
#
# Enhancements
# General tidy up and refinement of changable variables at the beginning
# Now it should be more straight forward to set up for each year/version/variable and
# clearer to read.
#
# Changes
#
# Bug fixes
# I had got the RAw and HOMOG anomalies the wrong way around for the plotter so homog was red and raw was blue
# Now corrected.
#
#
# Version 1 (29 January 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
# USE python2.7
# python2.7 OutputPHAASCIIPLOT_JAN2015.py
#
# REQUIRES
# LinearTrends.py
#************************************************************************
# Set up python imports
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb
from LinearTrends import MedianPairwise
# RESTART VALUE
Restarter='------' #'------' #'681040'
Spin='TRUE' #TRUE: loop through, FALSE: perform one stations only
Plotonly='FALSE' #TRUE or FALSE
AddLetter='a)' #'---'
# Set up initial run choices
styr = 1973
edyr = 2019
param = 'td' # tw, q, e, rh, t, td, dpd
nowmon = 'JAN'
nowyear = '2020'
thenmon = 'JAN'
thenyear = '2020'
version = '4.2.0.2019f'
homogtype = 'PHADPD' #'PHA','IDPHA','PHADPD'
updateyear = str(edyr)[2:4]
workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE20'+updateyear
# Set up file locations
STATSUFFIXOUT='_PHAadj.txt'
if param == 'rh':
param2 = 'RH' # Tw, q, e, RH, T, Td, DPD
unit = '%rh' # 'deg C','g/kg','hPa', '%rh'
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHArh_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'rh/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/RHABS/'
STATSUFFIXIN = '_RHmonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'rh/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/RHDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/RHDIR/'
elif param == 'q':
param2 = 'q' # Tw, q, e, RH, T, Td, DPD
unit = 'g/kg' # 'deg C','g/kg','hPa', '%rh'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'q/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/QABS/'
STATSUFFIXIN = '_qmonthQCabs.raw'
if homogtype == 'PHA':
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAq_'+thenmon+thenyear+'.txt'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'q/monthly/WMs.r00/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/QDIR/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/QDIR/'
elif homogtype =='IDPHA':
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_IDPHAq_'+thenmon+thenyear+'.txt'
INHOM = workingdir+'/MONTHLIES/HOMOG/IDPHAASCII/QDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/IDADJCOMP/QDIR/'
elif param == 'tw':
param2 = 'Tw' # Tw, q, e, RH, T, Td, DPD
unit = 'degrees C' # 'deg C','g/kg','hPa', '%rh'
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAtw_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'tw/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/TWABS/'
STATSUFFIXIN = '_TwmonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'tw/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/TWDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/TWDIR/'
elif param == 'e':
param2 = 'e' # Tw, q, e, RH, T, Td, DPD
unit = 'hPa' # 'deg C','g/kg','hPa', '%rh'
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAe_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'e/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/EABS/'
STATSUFFIXIN = '_emonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'e/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/EDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/EDIR/'
elif param == 't':
param2 = 'T' # Tw, q, e, RH, T, Td, DPD
unit = 'degrees C' # 'deg C','g/kg','hPa', '%rh'
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAt_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'t/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/TABS/'
STATSUFFIXIN = '_TmonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'t/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/TDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/TDIR/'
elif param == 'dpd':
param2 = 'RH' # Tw, q, e, RH, T, Td, DPD
unit = '%rh' # 'deg C','g/kg','hPa', '%rh'
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAdpd_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'dpd/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/DPDABS/'
STATSUFFIXIN = '_DPDmonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'dpd/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/DPDDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/DPDDIR/'
elif param == 'td':
param2 = 'Td' # Tw, q, e, RH, T, Td, DPD
unit = 'degrees C' # 'deg C','g/kg','hPa', '%rh'
if homogtype == 'PHADPD':
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHADPDtd_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'td/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/TDABS/'
STATSUFFIXIN = '_TdmonthQCabs.raw'
# INHOM='/data/local/hadkw/HADCRUH2/PROGS/PHA2015/pha52jgo/data/hadisdh/hadisdh7313td/monthly/WMs.r00/'
INHOMT = workingdir+'/MONTHLIES/HOMOG/IDPHAASCII/TDIR/'
INHOMDPD = workingdir+'/MONTHLIES/HOMOG/PHAASCII/DPDDIR/'
DPDBREAKFIL = workingdir+'/LISTS_DOCS/HadISDH.landDPD.'+version+'_PHA_'+thenmon+thenyear+'.log'
TBREAKFIL = workingdir+'/LISTS_DOCS/HadISDH.landT.'+version+'_IDPHAMG_'+thenmon+thenyear+'.log'
TDBREAKFIL = workingdir+'/LISTS_DOCS/HadISDH.landTd.'+version+'_PHADPD_'+thenmon+thenyear+'.log'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/IDPHAASCII/TDDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/IDADJCOMP/TDDIR/'
#NB: In a few cases Td will not have neighbours to plot so prog will fail. Restart.
else:
STATLIST = workingdir+'/LISTS_DOCS/goodforHadISDH.'+version+'_PHAtd_'+thenmon+thenyear+'.txt'
CORRFIL = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'td/corr/corr.log'
INRAW = workingdir+'/MONTHLIES/ASCII/derivedTDABS/'
STATSUFFIXIN = '_deTdmonthQCabs.raw'
INHOM = workingdir+'/PROGS/PHA2015/pha52jgo/data/hadisdh/73'+updateyear+'td/monthly/WMs.r00/'
OUTHOM = workingdir+'/MONTHLIES/HOMOG/PHAASCII/TDDIR/'
OUTPLOT = workingdir+'/MONTHLIES/HOMOG/STAT_PLOTS/PHAADJCOMP/TDDIR/'
# Set up variables and arrays needed
mdi=-99.99
DATASTART=dt.datetime(styr,1,1,0,0)
DATAEND=dt.datetime(edyr,12,1,0,0)
clmst=1981
clmed=2010
clmsty=(clmst-styr)
clmedy=(clmed-styr)
clmstm=(clmst-styr)*12
clmedm=((clmed-styr)*12)+11
CLIMSTART=dt.datetime(clmst,1,1,0,0)
CLIMEND=dt.datetime(clmed,12,1,0,0)
nmons=((edyr+1)-styr)*12
monarr=range(nmons)
nyrs=(edyr-styr)+1
yrarr=range(nyrs)
nstations=0 # defined after reading in station list
StationListWMO=[] # nstations list filled after reading in station list
StationListWBAN=[] # nstations list filled after reading in station list
nNstations=0 # defined after reading corr station list
NeighbourList=[] # nNstations list filled after reading in corr station list
MyStation=[] # filled after reading in candidate station
MyRAWStation=[] # filled after reading in candidate station
MyClims=[] # 12 element array of mean months 1981-2010
MyAnomalies=[] # filled with anomalies after subtracting climatology
MyHomogAnoms=[] # filled with homogenised anomalies
MyHomogAbs=[] # filled with climatology+homogenised anomalies
MyClimMeanShift=[] # flat value across complete climatology period that the homogenised values differ from zero by - to rezero anoms and adjust clims/abs
NeighbourStations=[] # nNstations by nmons array filled after reading in all neighbour stations
NeighbourAnomsStations=[] # nNstations by nmons array filled after anomalising all neighbour stations relative to climatology
NeighbourClimsStations=[] # nNstations by nmons array filled after anomalising all neighbour stations relative to climatology
NeighbourDiffStations=[] # nNstations by nmons array filled after creating candidate minus neighbour difference series
MyFile=' ' #string containing file name
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee,delimiter=delimee,encoding='latin-1') # ReadData
#************************************************************************
# MERGEADJUSTMENTS
def MergeAdjustments(FileInDPD, FileInT, FileOutTd, StationID, TheMCount):
''' Reads in PHA DPD adjustments and IDPHA T Adjustments '''
''' Sorts them and merges shifts on top of each other '''
''' Outputs DPDPHA in same format as IDPHA '''
nBreaks = 0 # defined after finding and reading in break locs
BreakLocsSt = np.reshape(0,(1)) # nBreaks list of start locations filled after reading in break locs list
BreakLocsEd = np.reshape(0,(1)) # nBreaks list of end locations filled after reading in break locs list
BreakSize = np.reshape(0.,(1)) # nBreaks list of sizes filled after reading in break locs list
BreakUncs = np.reshape(0.,(1)) # nBreaks list of uncertainties filled after reading in break locs list
BreakSources = np.reshape('x',(1)) # nBreaks list of uncertainties filled after reading in break locs list
BreakList = np.zeros((1,4)) # Build this on the fly to equal nBreaks(rows) by rel(adj,unc),act(adj,unc) including last HSP which will be zero
MyBreakLocs = [] # nBreaks+2 month locations for each break including month 1 if needed and last month
# read in the PHA log for DPD
BreakSize,BreakLocsSt,BreakLocsEd,BreakSources,BreakUncs,nBreaks=PHAReadSimple(FileInDPD,StationID,BreakSize,BreakLocsSt,
BreakLocsEd,BreakSources,BreakUncs,nBreaks,
TheMCount)
# read in the IDPHA log for T
BreakSize,BreakLocsSt,BreakLocsEd,BreakSources,BreakUncs,nBreaks=IDPHAReadSimple(FileInT,StationID,BreakSize,BreakLocsSt,
BreakLocsEd,BreakSources,BreakUncs,nBreaks,
TheMCount)
# sort and combine
BreakLocsSt,BreakLocsEd,BreakList,BreakSources,nBreaks=SortBreaksMerge(BreakLocsSt,BreakSize,BreakUncs,
BreakList,BreakSources,nBreaks,nmons)
# write out to file
LogBreakInfoMerge(FileOutTd,StationID,nBreaks,TheMCount,BreakLocsSt,BreakList,BreakSources)
return # MergeAdjustments
#************************************************************************
# PHAREADSIMPLE
def PHAReadSimple(FileName,StationID, all_adjust, all_starts, all_ends, all_sources, all_uncs, breakcount,TheMCount):
'''
Read in PHA results from Adjwrite.txt
StationIDs - list of station IDs
all_adjust - list of adjustment magnitudes
all_starts - list of adjustment date starts
all_ends - list of adjustment date ends
all_sources - list of adjustment source (DPD) in this case
'''
for line in open(FileName):
if "Adj write:"+StationID in line:
print(line)
moo = str.split(line)
tempstring = moo[12]
tempunc = tempstring[0:4]
if breakcount == 0:
### can use np.delete(array,row/column/pointers,axis)###
all_starts[0] = int(moo[4])
all_ends[0] = TheMCount
all_adjust[0] = float(moo[11])
if float(tempunc) > 0. :
all_uncs[0] = float(tempunc) # convert 1.65 sigma to 1 sigma
else:
all_uncs[0] = 0.
all_sources[0] = 'dd'
breakcount = breakcount+1
else:
all_starts = np.append(all_starts,int(moo[4]))
all_ends = np.append(all_ends,int(moo[7])) #int(moo[4]))
all_adjust = np.append(all_adjust,float(moo[11])) # positive adjustments to dewpoint t
if float(tempunc) > 0.:
all_uncs = np.append(all_uncs,float(tempunc))
else:
all_uncs = np.append(all_uncs,0.)
all_sources = np.append(all_sources,'dd')
breakcount = breakcount+1
all_starts[len(all_starts)-1] = 1 #start at 1 because ID will (no intro extra CP)
return all_adjust, all_starts, all_ends, all_sources, all_uncs, breakcount # PHAReadSimple
#************************************************************************
# IDPHAREAD
def IDPHAReadSimple(FileName,StationID, all_adjust, all_starts, all_ends, all_sources, all_uncs, breakcount,TheMCount):
'''
Read in PHA results from Adjwrite.txt
StationIDs - list of station IDs (wmo+wban)
all_adjust - list of adjustment magnitudes
all_starts - list of adjustment date starts
all_ends - list of adjustment date ends
all_sources - list of adjustment source (DPD) in this case
'''
for line in open(FileName):
if StationID in line:
print(line)
moo = str.split(line)
if breakcount == 0:
### can use np.delete(array,row/column/pointers,axis)###
all_starts[0] = int(moo[2])
all_ends[0] = TheMCount
all_adjust[0] = -(float(moo[6])) # negative adjustments to dewpoint t
if float(moo[7]) > 0. :
all_uncs[0] = float(moo[7]) # convert 1.65 sigma to 1 sigma
else:
all_uncs[0] = 0.
all_sources[0] = 't'
breakcount = breakcount+1
else:
all_starts = np.append(all_starts,int(moo[2]))
all_ends = np.append(all_ends,int(moo[3])) #int(moo[4]))
all_adjust = np.append(all_adjust,-(float(moo[6]))) # negative adjustments to dewpoint t
if float(moo[7]) > 0.:
all_uncs = np.append(all_uncs,float(moo[7]))
else:
all_uncs = np.append(all_uncs,0.)
all_sources = np.append(all_sources,'t')
breakcount = breakcount+1
return all_adjust, all_starts, all_ends, all_sources, all_uncs, breakcount # IDPHAReadSimple
#************************************************************************
# SORTBREAKSMERGE
def SortBreaksMerge(TheStarts,TheAdjs,TheUncs,TheBreakList,TheSources,TheBCount,TheMCount):
''' Looks at list of potential from T and DPD '''
''' Sorts them from 1 to 480 (or total) months '''
''' Merges duplicates and those within 12 months of a preceding break '''
''' Merges accumulated adjustment and uncertainty '''
''' resets nBreaks appropriately '''
''' IF DPD inc and T stays the same, Td should dec and vice versa '''
''' IF T inc and DPD stays the same, Td should inc and vice versa '''
''' IF DPD inc and T inc, Td should stay about the same and vice versa '''
''' IF DPD inc and T dec, Td should decrease and vice versa '''
''' THIS WILL NOT ALWAYS WORK OUT PERFECTLY BUT ITS OWNLY FOR UNCERTAINTY ESTIMATION '''
SortedInd = np.argsort(TheStarts) # sorts the list BreakLocs indexing from 0
TheStarts = TheStarts[SortedInd]
TheAdjs = TheAdjs[SortedInd]
TheUncs = TheUncs[SortedInd]
TheSources = TheSources[SortedInd]
print(TheStarts)
LastBreakLocSt = TheStarts[0]
NewStarts = np.reshape(TheStarts[0],(1))
NewAdjs = np.reshape(TheAdjs[0],(1))
NewUncs = np.reshape(TheUncs[0],(1))
NewSources = np.reshape(TheSources[0],(1))
derr = 0.
terr = 0.
dadj = 0.
tadj = 0.
if TheSources[0] =='t' :
terr = TheUncs[0]
tadj = TheAdjs[0]
else:
derr = TheUncs[0]
dadj = TheAdjs[0]
realcounter=0
for bb in range(1,TheBCount):
if TheSources[bb] =='t' :
terr = TheUncs[bb]
tadj = TheAdjs[bb]
else:
derr = TheUncs[bb]
dadj = TheAdjs[bb]
if TheStarts[bb]-LastBreakLocSt > 11: # keep it if its at least a year apart from any other break
NewStarts = np.append(NewStarts,TheStarts[bb])
NewAdjs = np.append(NewAdjs,tadj+dadj)
NewUncs = np.append(NewUncs,np.sqrt((terr**2) + (derr**2)))
NewSources = np.append(NewSources,TheSources[bb])
LastBreakLocSt = TheStarts[bb]
realcount = realcounter+1
else:
NewAdjs[realcounter-1] = tadj+dadj
NewUncs[realcounter-1] = np.sqrt((terr**2) + (derr**2))
NewSources[realcounter-1] = 'b'
TheBCount = len(NewStarts)
# reverse all of the arrays, sort out ends and independent adjustment/uncertainties
NewStarts = NewStarts[::-1]
NewAdjs = NewAdjs[::-1]
NewUncs = NewUncs[::-1]
NewSources = NewSources[::-1]
NewEnds = np.empty_like(NewStarts)
NewEnds[0] = TheMCount
TheBreakList = np.zeros((TheBCount,4)) # Build this on the fly to equal nBreaks(rows) by rel(adj,unc),act(adj,unc) including last HSP which will be zero
for bb in range(1,TheBCount):
NewEnds[bb] = (NewStarts[bb-1])-1
TheBreakList[bb,0] = NewAdjs[bb]-NewAdjs[bb-1] # this is this funny range thing again needs +1
TheBreakList[bb,1] = np.sqrt((NewUncs[bb]**2)-(NewUncs[bb-1]**2))
TheBreakList[bb,2] = NewAdjs[bb] # minus or not minus?
TheBreakList[bb,3] = NewUncs[bb]
print(TheBCount,NewStarts)
return NewStarts,NewEnds,TheBreakList,NewSources,TheBCount #SortBreaksMerge
#************************************************************************
# LOGBREAKINFOMERGE
def LogBreakInfoMerge(TheFile,TheStationID,TheBCount,TheMonthCount,TheBreakLocsSt,TheBreakList,TheSources):
''' Print out a list of breaks found with their location, size and uncertainty '''
''' Append to file '''
''' IN ALL CASES ADJUSTMENTS ARE -(adj) TO MATCH PHA OUTPUT '''
''' IF THE DATA HAVE BEEN ADJUSTED DOWN THEN THE ADJUSTMENT GIVEN IS POSITIVE - WEIRD '''
filee = open(TheFile,'a+')
if TheBCount == 1:
filee.write('%11s %2s %3i %3i %6.2f %6.2f %6.2f %6.2f \n' % (TheStationID,1,1,
TheMonthCount,TheBreakList[0,0],TheBreakList[0,1],TheBreakList[0,2],TheBreakList[0,3]))
else:
LocEnd=TheMonthCount
# Force first location of TheBreakLocs to be 0 instead of 1 so that a single line of code works
for b in range(0,TheBCount):
print(TheBCount,b)
# sign swapping of adjustments for consistency with PHA logs
filee.write('%11s %2s %3i %3i %6.2f %6.2f %6.2f %6.2f %2s\n' % (TheStationID,TheBCount-b,TheBreakLocsSt[b],
LocEnd,-(TheBreakList[b,0]),TheBreakList[b,1],-(TheBreakList[b,2]),TheBreakList[b,3],TheSources[b]))
LocEnd = (TheBreakLocsSt[b]-1)
filee.close()
return #LogBreakInfoMerge
#************************************************************************
# FINDNEIGHBOURS
def FindNeighbours(FileName,CandID,neighbourcount,neighbourlist):
''' open the corr file and find the line beginning with the candidate station '''
''' list all neighbouring stations up to 40'''
''' be sure not to count 0s'''
''' return neighbour count and neighbour list '''
for line in open(FileName):
neighbourlist = [] # make sure its blank to start
neighbourlist = str.split(line) # makes a list
if neighbourlist[0] == CandID: # found the line
neighbourcount = len(neighbourlist) # this doesn't include the zeros but does include the candidate in the count.
break # don't waste time, exit the loop
return neighbourcount,neighbourlist # FindNeighbours
#************************************************************************
# READINNETWORKS
def ReadInNetworks(TheCount,TheList,TheCStation,TheFilebitA,TheFilebitB,TheYears,TheData):
''' Loop through all neighbour station raw files '''
''' IGNORE FIRST FILE AS THIS IS THE CANDIDATE STATION '''
''' DOUBLE CHECK ALL OTHER STATIONS ARE NOT CANDIDATE AS THIS IS A KNOWN PROBLEM '''
''' read in using ReadStations and add to array '''
TheNewCount = 0 # setting up new variables to output
TheNewList = []
TheData = np.array(TheData) # was an empty list
for n,TheNStation in enumerate(TheList[1:]): # 1: starts at second element
if TheNStation == TheCStation:
continue
TheFile = TheFilebitA+TheNStation[0:6]+'-'+TheNStation[6:11]+TheFilebitB
TempStation = []
TheTypes = np.append("|S12",["int"]*13)
TheDelimiters = np.append([12,4,6],[9]*11)
RawData = ReadData(TheFile,TheTypes,TheDelimiters)
for yy in TheYears:
moo = list(RawData[yy])
if yy == 0:
TempStation = moo[2:14]
else:
TempStation = np.append(TempStation,moo[2:14]) # for some silly reason you subscript starting from 0th element to the nth rather than n-1th element
if TheData.size: # if empty array then use first element, otherwise append
TheData = np.append(TheData,np.reshape(TempStation/100.,(1,len(TempStation))),axis=0) # now in proper units, fill the Neighbour array
else:
TheData = np.reshape(TempStation/100.,(1,len(TempStation)))
if any(TheNewList): # if empty array then use first element, otherwise append
TheNewList = np.append(TheNewList,TheNStation)
else:
TheNewList = [TheNStation]
TheNewCount = len(TheNewList) # Now this only includes the neighbours and not the candidate, as in FingNeighbours
return TheData,TheNewList,TheNewCount #ReadInNetworks
#************************************************************************
# MAKEANOMALIES
def MakeAnomalies(TheData,TheAnomalies,TheClims,TheYCount,TheStClim,TheEdClim,TheMDI):
''' Working on both 1D and 2D (multiple station) arrays '''
''' Use given climatology period to create monthly clims and anomalies '''
sizoo = TheData.shape # returns a tuple of rows,columns
TheClims = np.empty((sizoo[0],12)) # initialise clims array for nstations (rows) by 12 months (columns)
TheClims.fill(TheMDI)
TheAnomalies = np.empty(sizoo)
TheAnomalies.fill(TheMDI)
for t,TempStation in enumerate(TheData): # row by row so ok as long as each station is a row
#print(t,len(TempStation))
Mooch = np.reshape(TempStation,(TheYCount,12)) # years(rows) by months(columns)
Mooch2 = np.empty_like(Mooch) # To make sure I don't overwrite the absolute data
Mooch2.fill(TheMDI)
for mm in range(12):
subarr = Mooch[TheStClim:TheEdClim+1,mm]
#print(mm,subarr)
gots = (subarr > TheMDI)
if len(subarr[gots]) >= 15: # more sophisticated checking has been done previously
TheClims[t,mm] = np.mean(subarr[gots])
gots2 = (Mooch[:,mm] > TheMDI)
Mooch2[gots2,mm] = Mooch[gots2,mm]-TheClims[t,mm]
#print " %6.2f"*40 % tuple(Mooch[:,mm])
TheAnomalies[t,] = np.reshape(Mooch2,(1,12*TheYCount))
return TheAnomalies,TheClims #MakeAnomalies
#************************************************************************
# WRITEOUT
def WriteOut(TheData,TheFile,TheYears,TheStYr,TheStationID):
''' Use numpy array to reform to years by months (row/column)'''
''' Output lines to text of StationID, space, year, 12 months of data*100 (i6,x)'''
TheData = np.reshape(TheData,(-1,12)) # an nyears by 12 months array
for outt in TheYears:
for mm in range(12):
if mm == 0:
moo = [np.char.mod("%6i",int(TheData[outt,mm]*100.))," "]
else:
moo = moo+[np.char.mod("%6i",int(TheData[outt,mm]*100.))," "] # list of silly months with spaces between
if outt == 0:
goo = [TheStationID," ",TheYears[outt]+TheStYr]+moo
else:
goo = np.vstack((goo,[TheStationID," ",TheYears[outt]+TheStYr]+moo))
# NEED TO MAKE A 2D STRING ARRAY - seems very long winded to me!
np.savetxt(TheFile,goo,fmt='%s',delimiter='')
return #WriteOut
#************************************************************************
# PLOTHOMOGTS
def PlotHomogTS(TheFile,TheStation,TheNeighbours,TheHStation,TheNCount,TheMDI,TheStYr,TheYCount,unit,typee,Letteree):
''' Plot raw candidate and neighbours with homogenised candidate '''
''' Add medianpairwise trends - from code medianpairwise.py '''
'''MAKE MEDIANPAIRWISE.PY and COMPLETE WHEN HOMOG SERIES IS DONE '''
# create annual averages and years and titles
TheStationAnn = np.empty(TheYCount)
TheStationAnn.fill(TheMDI)
TheHStationAnn = np.empty(TheYCount)
TheHStationAnn.fill(TheMDI)
if TheNCount > 1:
TheNeighboursAnn = np.empty((len(TheNeighbours[:,0]),TheYCount))
TheNeighboursAnn.fill(TheMDI)
TheStation = np.reshape(TheStation,(TheYCount,12))
TheHStation = np.reshape(TheHStation,(TheYCount,12))
for yy in range(TheYCount):
if np.sum(TheStation[yy,] != TheMDI) >= 9:
TheStationAnn[yy] = np.mean(TheStation[yy,np.where(TheStation[yy,] != TheMDI)])
if np.sum(TheHStation[yy,] != TheMDI) >= 9:
TheHStationAnn[yy] = np.mean(TheHStation[yy,np.where(TheHStation[yy,] != TheMDI)])
TheStation = np.reshape(TheStation,(TheYCount*12))
TheHStation = np.reshape(TheHStation,(TheYCount*12))
if TheNCount > 1:
for n,Neighbour in enumerate(TheNeighbours):
Neighbour = np.reshape(Neighbour,(TheYCount,12))
for yy in range(TheYCount):
if np.sum(Neighbour[yy,] != TheMDI) >= 9:
TheNeighboursAnn[n,yy] = np.mean(Neighbour[yy,np.where(Neighbour[yy,] != TheMDI)])
TheYears = np.reshape(range(TheStYr,TheStYr+TheYCount),TheYCount)
ytitlee = typee+' ('+unit+')'
xtitlee = 'Years'
# get decadal trends and 5th-9th conf
rawtrend = [0.,0.,0.]
homtrend = [0.,0.,0.]
rawtrend = MedianPairwise(TheStationAnn,TheMDI,rawtrend)
homtrend = MedianPairwise(TheHStationAnn,TheMDI,homtrend)
# set up plot
plt.clf()
plt.figure(1,figsize=(8,4))
plt.axes([0.1,0.1,0.85,0.80])
if TheNCount > 1:
PileItUp = np.append(TheNeighboursAnn,np.append(np.reshape(TheStationAnn,(1,TheYCount)),
np.reshape(TheHStationAnn,(1,TheYCount)),axis=0),axis=0)
else:
PileItUp = np.append(np.reshape(TheStationAnn,(1,TheYCount)),
np.reshape(TheHStationAnn,(1,TheYCount)),axis=0)
plt.ylim([np.floor(min(PileItUp[PileItUp != TheMDI]))-2,
np.ceil(max(PileItUp[PileItUp != TheMDI]))+2])
plt.xlim([TheStYr,TheStYr+TheYCount])
plt.tick_params(axis='both', which='major', labelsize=16)
if TheNCount > 1:
for n,Neighbour in enumerate(TheNeighboursAnn):
line, = plt.plot(TheYears[np.where(Neighbour > TheMDI)],Neighbour[np.where(Neighbour > TheMDI)],color='black',linewidth=0.25)
line, = plt.plot(TheYears[np.where(TheStationAnn > TheMDI)],TheStationAnn[np.where(TheStationAnn > TheMDI)],'r',linewidth=2)
line, = plt.plot(TheYears[np.where(TheHStationAnn > TheMDI)],TheHStationAnn[np.where(TheHStationAnn > TheMDI)],'b',linewidth=2)
if typee=='anomalies':
line, = plt.plot(np.append(TheYears,TheStYr+TheYCount+1),np.zeros(TheYCount+1),'black',linewidth=1)
plt.xlabel(xtitlee,size=16)
plt.ylabel(ytitlee,size=16)
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
rawstr = "%5.2f +/- %5.2f to %5.2f %s /decade " % (rawtrend[0]*10,rawtrend[1]*10,rawtrend[2]*10,unit)
homstr = "%5.2f +/- %5.2f to %5.2f %s /decade " % (homtrend[0]*10,homtrend[1]*10,homtrend[2]*10,unit)
plt.figtext(0.1,0.84,rawstr,color='r',size=16)
plt.figtext(0.1,0.78,homstr,color='b',size=16)
if Letteree != '---':
plt.figtext(0.05,0.95,Letteree,color='Black',size=18)
#plt.show()
plt.savefig(TheFile+".eps")
plt.savefig(TheFile+".png")
return #PlotHomogTS
#***********************************************************************
# MAIN PROGRAM
#***********************************************************************
# read in station list
MyTypes = ("|U6","|U5","float","float","float","|U4","|U30","|U7","int")
#MyTypes = ("|S6","|S5","float","float","float","|S4","|S30","|S7","int")
MyDelimiters = [6,5,8,10,7,4,30,7,5]
RawData = ReadData(STATLIST,MyTypes,MyDelimiters)
StationListWMO = np.array(RawData['f0'])
StationListWBAN = np.array(RawData['f1'])
StationListLat = np.array(RawData['f2'])
StationListLon = np.array(RawData['f3'])
StationListElev = np.array(RawData['f4'])
StationListCID = np.array(RawData['f5'])
StationListName = np.array(RawData['f6'])
nstations = len(StationListWMO)
# loop through station by station
for st in range(nstations):
# check if restart necessary
if Restarter != '------' and Restarter != StationListWMO[st]:
continue
Restarter = '------'
# set up clean arrays and variables
nNstations = 0 # defined after reading corr station list
NeighbourList = [] # nNstations list filled after reading in corr station list
MyStation = np.zeros((nyrs,12)) # filled after reading in candidate station
MyStation[:,:] = (-9999)
MyTStation = []
MyDPDStation = []
MyRAWStation = []
MyClims = [] # 12 element array of mean months 1981-2010
MyAnomalies = [] # filled with anomalies after subtracting climatology
MyHomogAnoms = [] # filled with homogenised anomalies
MyHomogAbs = [] # filled with climatology+homogenised anomalies
MyClimMeanShift = [] # flat value across complete climatology period that the homogenised values differ from zero by - to rezero anoms and adjust clims/abs
NeighbourStations = [] # nNstations by nmons array filled after reading in all neighbour stations
NeighbourAnomsStations = [] # nNstations by nmons array filled after anomalising all neighbour stations relative to climatology
NeighbourClimsStations = [] # nNstations by nmons array filled after anomalising all neighbour stations relative to climatology
NeighbourDiffStations = [] # nNstations by nmons array filled after creating candidate minus neighbour difference series
# read in the RAW station file
MyFile = INRAW+StationListWMO[st]+"-"+StationListWBAN[st]+STATSUFFIXIN
MyTypes=np.append("|S12",["int"]*13)
MyDelimiters = np.append([12,4,6],[9]*11)
RawData = ReadData(MyFile,MyTypes,MyDelimiters)
for yy in yrarr:
moo = list(RawData[yy])
if yy == 0:
MyRAWStation = moo[2:14]
else:
MyRAWStation = np.append(MyRAWStation,moo[2:14]) # for some silly reason you subscript starting from 0th element to the nth rather than n-1th element
print(st,MyFile)
MyRAWStation = np.reshape(MyRAWStation/100.,(1,nmons)) # now in proper units and an array not list
# read in the PHA HOMOGENISED station file
if homogtype == 'PHA':
MyFile = INHOM+StationListWMO[st]+StationListWBAN[st]+".WMs.r00.tavg"
MyTypes = np.append(["|S16","|S6"],["|S9"]*11)
MyDelimiters = np.append([16,6],[9]*11)
RawData = ReadData(MyFile,MyTypes,MyDelimiters)
for yy in range(0,len(RawData)):
# get the year
moo = list(RawData[yy])
mystring = moo[0]
ypoint = int(mystring[12:16])-styr
# get the non'd' bits of the strings
newmoo = [int(a[-5:]) for a in moo[1:13]]
# print("NEWMOO",newmoo)
MyStation[ypoint] = newmoo
print(st,MyFile)
MyStation = np.reshape(MyStation/100.,(1,nmons)) # now in proper units and an array not list
elif homogtype == 'PHADPD':
MyFile = INHOMT+StationListWMO[st]+StationListWBAN[st]+'_IDPHAadj.txt'
MyTypes = np.append("|S16",["int"]*12)
MyDelimiters = np.append([16,6],[7]*11)
RawData = ReadData(MyFile,MyTypes,MyDelimiters)
for yy in yrarr:
moo = list(RawData[yy])
if yy == 0:
MyTStation = moo[1:13]
else:
MyTStation = np.append(MyTStation,moo[1:13]) # for some silly reason you subscript starting from 0th element to the nth rather than n-1th element
print(st,MyFile)
MyTStation = np.reshape(MyTStation/100.,(1,nmons)) # now in proper units and an array not list
MyFile = INHOMDPD+StationListWMO[st]+StationListWBAN[st]+'_PHAadj.txt'
MyTypes = np.append("|S16",["int"]*12)
MyDelimiters = np.append([16,6],[7]*11)
RawData = ReadData(MyFile,MyTypes,MyDelimiters)
for yy in yrarr:
moo=list(RawData[yy])
if yy == 0:
MyDPDStation = moo[1:13]
else:
MyDPDStation = np.append(MyDPDStation,moo[1:13]) # for some silly reason you subscript starting from 0th element to the nth rather than n-1th element
print(st,MyFile)
MyDPDStation = np.reshape(MyDPDStation/100.,(1,nmons)) # now in proper units and an array not list
# create Td from T-DPD where data exist
MyStation = np.empty_like(MyTStation)
MyStation[:,:] = (-99.99)
for mm in range(len(MyStation[0,:])):
if MyTStation[0,mm] > mdi and MyDPDStation[0,mm] > mdi:
MyStation[0,mm] = MyTStation[0,mm]-MyDPDStation[0,mm]
# ALSO FAFF AROND READING IN ADJUSTMENT FILES AND MERGING
MergeAdjustments(DPDBREAKFIL,TBREAKFIL,TDBREAKFIL,StationListWMO[st]+StationListWBAN[st],nmons)
elif homogtype == 'IDPHA':
MyFile = INHOM+StationListWMO[st]+StationListWBAN[st]+'_IDPHAadj.txt'
MyTypes = np.append("|S16",["int"]*12)
MyDelimiters = np.append([16,6],[7]*11)
RawData = ReadData(MyFile,MyTypes,MyDelimiters)
for yy in yrarr:
moo = list(RawData[yy])
if yy == 0:
MyStation = moo[1:13]
else:
MyStation = np.append(MyStation,moo[1:13]) # for some silly reason you subscript starting from 0th element to the nth rather than n-1th element
print(st,MyFile)
MyStation = np.reshape(MyStation/100.,(1,nmons)) # now in proper units and an array not list
nNstations,NeighbourList = FindNeighbours(CORRFIL,StationListWMO[st]+StationListWBAN[st],nNstations,
NeighbourList)
print("No. of Neighbours: ",nNstations-1) # not including candidate but may have duplicate
if nNstations > 1:
# read in the neighbour files - if insufficient then list in bad stations list
NeighbourStations,NeighbourList,nNstations=ReadInNetworks(nNstations,NeighbourList,
StationListWMO[st]+StationListWBAN[st],INRAW,
STATSUFFIXIN,yrarr,NeighbourStations)
print("Actual No. of Neighbours: ",nNstations) # not including candidate but may have duplicate
# convert all to anomalies (storing station climatology)
MyAnomalies,MyClims = MakeAnomalies(MyRAWStation,MyAnomalies,MyClims,nyrs,clmsty,clmedy,mdi)
MyHomogAnoms,MyClims = MakeAnomalies(MyStation,MyHomogAnoms,MyClims,nyrs,clmsty,clmedy,mdi)
NeighbourAnomsStations,NeighbourClimsStations = MakeAnomalies(NeighbourStations,NeighbourAnomsStations,
NeighbourClimsStations,nyrs,clmsty,clmedy,mdi)
# PLOT CANDIDATE AND NEIGHBOURS UNHOMOG WITH HOMOG ON TOP - ABS, ANOMS with MedianPairwiseTrends
# REZEROD HOMOG MAY MEAN ITS NOW OFFSET COMPARED TO ORIGINAL
MyPlotFile = OUTPLOT+StationListWMO[st]+StationListWBAN[st]+'_trendcomp_'+param+'_'+nowmon+nowyear+'abs'
PlotHomogTS(MyPlotFile,MyRAWStation,NeighbourStations,MyStation,nNstations,mdi,styr,nyrs,unit,'absolutes',AddLetter)
MyPlotFile = OUTPLOT+StationListWMO[st]+StationListWBAN[st]+'_trendcomp_'+param+'_'+nowmon+nowyear+'anoms'
PlotHomogTS(MyPlotFile,MyAnomalies,NeighbourAnomsStations,MyHomogAnoms,nNstations,mdi,styr,nyrs,unit,'anomalies',AddLetter)
# print out homogenised station anomalies
if Plotonly == 'FALSE':
MyFileOut = OUTHOM+StationListWMO[st]+StationListWBAN[st]+STATSUFFIXOUT
WriteOut(MyStation,MyFileOut,yrarr,styr,StationListWMO[st]+StationListWBAN[st])
if Spin == 'FALSE':
break
# end loop of stations
# pdb.set_trace()
print("And, we are done!")
|
|
"""
:codeauthor: Gareth J. Greenaway <[email protected]>
"""
import logging
import pytest
import salt.modules.beacons as beaconmod
import salt.states.beacon as beaconstate
import salt.states.service as service
import salt.utils.platform
from salt.utils.event import SaltEvent
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
def func(name):
"""
Mock func method
"""
return name
@pytest.fixture
def configure_loader_modules():
return {
service: {
"__env__": "base",
"__salt__": {},
"__opts__": {"test": False, "cachedir": ""},
"__instance_id__": "",
"__low__": {},
"__utils__": {},
"__context__": {},
},
beaconstate: {"__salt__": {}, "__opts__": {}},
beaconmod: {"__salt__": {}, "__opts__": {}},
}
def test_get_systemd_only():
"""
Test the _get_system_only function
"""
def test_func(cats, dogs, no_block):
pass
with patch.object(service._get_systemd_only, "HAS_SYSTEMD", True, create=True):
ret, warnings = service._get_systemd_only(
test_func, {"cats": 1, "no_block": 2, "unmask": 3}
)
assert len(warnings) == 0
assert ret == {"no_block": 2}
ret, warnings = service._get_systemd_only(test_func, {"cats": 1, "unmask": 3})
assert len(warnings) == 0
assert ret == {}
def test_get_systemd_only_platform():
"""
Test the _get_system_only function on unsupported platforms
"""
def test_func(cats, dogs, no_block):
pass
with patch.object(service._get_systemd_only, "HAS_SYSTEMD", False, create=True):
ret, warnings = service._get_systemd_only(
test_func, {"cats": 1, "no_block": 2, "unmask": 3}
)
assert warnings == ["The 'no_block' argument is not supported by this platform"]
assert ret == {}
ret, warnings = service._get_systemd_only(test_func, {"cats": 1, "unmask": 3})
assert len(warnings) == 0
assert ret == {}
def test_get_systemd_only_no_mock():
"""
Test the _get_system_only without mocking
"""
def test_func(cats, dogs, no_block):
pass
ret, warnings = service._get_systemd_only(
test_func, {"cats": 1, "no_block": 2, "unmask": 3}
)
assert isinstance(ret, dict)
assert isinstance(warnings, list)
def test_running():
"""
Test to verify that the service is running
"""
ret = [
{"comment": "", "changes": {}, "name": "salt", "result": True},
{
"changes": {},
"comment": "The service salt is already running",
"name": "salt",
"result": True,
},
{
"changes": "saltstack",
"comment": "The service salt is already running",
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": "Service salt is set to start",
"name": "salt",
"result": None,
},
{
"changes": "saltstack",
"comment": "Started service salt",
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": "The service salt is already running",
"name": "salt",
"result": True,
},
{
"changes": "saltstack",
"comment": "Service salt failed to start",
"name": "salt",
"result": False,
},
{
"changes": "saltstack",
"comment": (
"Started service salt\nService masking not available on this minion"
),
"name": "salt",
"result": True,
},
{
"changes": "saltstack",
"comment": (
"Started service salt\nService masking not available on this minion"
),
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": (
"The service salt is disabled but enable is not True. Set enable to"
" True to successfully start the service."
),
"name": "salt",
"result": False,
},
{
"changes": {},
"comment": "The service salt is set to restart",
"name": "salt",
"result": None,
},
]
tmock = MagicMock(return_value=True)
fmock = MagicMock(return_value=False)
vmock = MagicMock(return_value="salt")
with patch.object(service, "_enabled_used_error", vmock):
assert service.running("salt", enabled=1) == "salt"
with patch.object(service, "_available", fmock):
assert service.running("salt") == ret[0]
with patch.object(service, "_available", tmock):
with patch.dict(service.__opts__, {"test": False}):
with patch.dict(
service.__salt__,
{"service.enabled": tmock, "service.status": tmock},
):
assert service.running("salt") == ret[1]
mock = MagicMock(return_value={"changes": "saltstack"})
with patch.dict(
service.__salt__,
{
"service.enabled": MagicMock(side_effect=[False, True]),
"service.status": tmock,
},
):
with patch.object(service, "_enable", mock):
assert service.running("salt", True) == ret[2]
with patch.dict(
service.__salt__,
{
"service.enabled": MagicMock(side_effect=[True, False]),
"service.status": tmock,
},
):
with patch.object(service, "_disable", mock):
assert service.running("salt", False) == ret[2]
with patch.dict(
service.__salt__,
{
"service.status": MagicMock(side_effect=[False, True]),
"service.enabled": MagicMock(side_effect=[False, True]),
"service.start": MagicMock(return_value="stack"),
},
):
with patch.object(
service,
"_enable",
MagicMock(return_value={"changes": "saltstack"}),
):
assert service.running("salt", True) == ret[4]
with patch.dict(
service.__salt__,
{
"service.status": MagicMock(side_effect=[False, True]),
"service.enabled": MagicMock(side_effect=[False, True]),
"service.unmask": MagicMock(side_effect=[False, True]),
"service.start": MagicMock(return_value="stack"),
},
):
with patch.object(
service,
"_enable",
MagicMock(return_value={"changes": "saltstack"}),
):
assert service.running("salt", True, unmask=True) == ret[7]
with patch.dict(service.__opts__, {"test": True}):
with patch.dict(service.__salt__, {"service.status": tmock}):
assert service.running("salt") == ret[5]
with patch.dict(service.__salt__, {"service.status": fmock}):
assert service.running("salt") == ret[3]
with patch.dict(service.__opts__, {"test": False}):
with patch.dict(
service.__salt__,
{
"service.status": MagicMock(side_effect=[False, False]),
"service.enabled": MagicMock(side_effect=[True, True]),
"service.start": MagicMock(return_value="stack"),
},
):
with patch.object(
service,
"_enable",
MagicMock(return_value={"changes": "saltstack"}),
):
assert service.running("salt", True) == ret[6]
# test some unique cases simulating Windows
with patch.object(salt.utils.platform, "is_windows", tmock):
# We should fail if a service is disabled on Windows and enable
# isn't set.
with patch.dict(
service.__salt__,
{
"service.status": fmock,
"service.enabled": fmock,
"service.start": tmock,
},
):
assert service.running("salt", None) == ret[9]
assert service.__context__ == {"service.state": "running"}
# test some unique cases simulating macOS
with patch.object(salt.utils.platform, "is_darwin", tmock):
# We should fail if a service is disabled on macOS and enable
# isn't set.
with patch.dict(
service.__salt__,
{
"service.status": fmock,
"service.enabled": fmock,
"service.start": tmock,
},
):
assert service.running("salt", None) == ret[9]
assert service.__context__ == {"service.state": "running"}
# test enabling a service prior starting it on macOS
with patch.dict(
service.__salt__,
{
"service.status": MagicMock(side_effect=[False, "loaded"]),
"service.enabled": MagicMock(side_effect=[False, True]),
"service.start": tmock,
},
):
with patch.object(
service,
"_enable",
MagicMock(return_value={"changes": "saltstack"}),
):
assert service.running("salt", True) == ret[4]
assert service.__context__ == {"service.state": "running"}
# if an enable attempt fails on macOS or windows then a
# disabled service will always fail to start.
with patch.dict(
service.__salt__,
{
"service.status": fmock,
"service.enabled": fmock,
"service.start": fmock,
},
):
with patch.object(
service,
"_enable",
MagicMock(
return_value={"changes": "saltstack", "result": False}
),
):
assert service.running("salt", True) == ret[6]
assert service.__context__ == {"service.state": "running"}
def test_running_in_offline_mode():
"""
Tests the case in which a service.running state is executed on an offline environemnt
"""
name = "thisisnotarealservice"
with patch.object(service, "_offline", MagicMock(return_value=True)):
ret = service.running(name=name)
assert ret == {
"changes": {},
"comment": "Running in OFFLINE mode. Nothing to do",
"result": True,
"name": name,
}
def test_dead():
"""
Test to ensure that the named service is dead
"""
ret = [
{"changes": {}, "comment": "", "name": "salt", "result": True},
{
"changes": "saltstack",
"comment": "The service salt is already dead",
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": "Service salt is set to be killed",
"name": "salt",
"result": None,
},
{
"changes": "saltstack",
"comment": "Service salt was killed",
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": "Service salt failed to die",
"name": "salt",
"result": False,
},
{
"changes": "saltstack",
"comment": "The service salt is already dead",
"name": "salt",
"result": True,
},
]
info_mock = MagicMock(return_value={"StartType": ""})
mock = MagicMock(return_value="salt")
with patch.object(service, "_enabled_used_error", mock):
assert service.dead("salt", enabled=1) == "salt"
tmock = MagicMock(return_value=True)
fmock = MagicMock(return_value=False)
with patch.object(service, "_available", fmock):
assert service.dead("salt") == ret[0]
with patch.object(service, "_available", tmock):
mock = MagicMock(return_value={"changes": "saltstack"})
with patch.dict(service.__opts__, {"test": True}):
with patch.dict(
service.__salt__,
{
"service.enabled": fmock,
"service.stop": tmock,
"service.status": fmock,
"service.info": info_mock,
},
):
with patch.object(service, "_enable", mock):
assert service.dead("salt", True) == ret[5]
with patch.dict(
service.__salt__,
{
"service.enabled": tmock,
"service.status": tmock,
"service.info": info_mock,
},
):
assert service.dead("salt") == ret[2]
with patch.dict(service.__opts__, {"test": False}):
with patch.dict(
service.__salt__,
{
"service.enabled": fmock,
"service.stop": tmock,
"service.status": fmock,
"service.info": info_mock,
},
):
with patch.object(service, "_enable", mock):
assert service.dead("salt", True) == ret[1]
with patch.dict(
service.__salt__,
{
"service.enabled": MagicMock(side_effect=[True, True, False]),
"service.status": MagicMock(side_effect=[True, False, False]),
"service.stop": MagicMock(return_value="stack"),
"service.info": info_mock,
},
):
with patch.object(
service,
"_enable",
MagicMock(return_value={"changes": "saltstack"}),
):
assert service.dead("salt", True) == ret[3]
# test an initd which a wrong status (True even if dead)
with patch.dict(
service.__salt__,
{
"service.enabled": MagicMock(side_effect=[False, False, False]),
"service.status": MagicMock(side_effect=[True, True, True]),
"service.stop": MagicMock(return_value="stack"),
"service.info": info_mock,
},
):
with patch.object(service, "_disable", MagicMock(return_value={})):
assert service.dead("salt", False) == ret[4]
assert service.__context__ == {"service.state": "dead"}
def test_dead_with_missing_service():
"""
Tests the case in which a service.dead state is executed on a state
which does not exist.
See https://github.com/saltstack/salt/issues/37511
"""
name = "thisisnotarealservice"
with patch.dict(
service.__salt__, {"service.available": MagicMock(return_value=False)}
):
ret = service.dead(name=name)
assert ret == {
"changes": {},
"comment": "The named service {} is not available".format(name),
"result": True,
"name": name,
}
def test_dead_in_offline_mode():
"""
Tests the case in which a service.dead state is executed on an offline environemnt
"""
name = "thisisnotarealservice"
with patch.object(service, "_offline", MagicMock(return_value=True)):
ret = service.dead(name=name)
assert ret == {
"changes": {},
"comment": "Running in OFFLINE mode. Nothing to do",
"result": True,
"name": name,
}
def test_enabled():
"""
Test to verify that the service is enabled
"""
ret = {"changes": "saltstack", "comment": "", "name": "salt", "result": True}
mock = MagicMock(return_value={"changes": "saltstack"})
with patch.object(service, "_enable", mock):
assert service.enabled("salt") == ret
assert service.__context__ == {"service.state": "enabled"}
def test_disabled():
"""
Test to verify that the service is disabled
"""
ret = {"changes": "saltstack", "comment": "", "name": "salt", "result": True}
mock = MagicMock(return_value={"changes": "saltstack"})
with patch.object(service, "_disable", mock):
assert service.disabled("salt") == ret
assert service.__context__ == {"service.state": "disabled"}
def test_mod_watch():
"""
Test to the service watcher, called to invoke the watch command.
"""
ret = [
{
"changes": {},
"comment": "Service is already stopped",
"name": "salt",
"result": True,
},
{
"changes": {},
"comment": "Unable to trigger watch for service.stack",
"name": "salt",
"result": False,
},
{
"changes": {},
"comment": "Service is set to be started",
"name": "salt",
"result": None,
},
{
"changes": {"salt": "salt"},
"comment": "Service started",
"name": "salt",
"result": "salt",
},
]
mock = MagicMock(return_value=False)
with patch.dict(service.__salt__, {"service.status": mock}):
assert service.mod_watch("salt", "dead") == ret[0]
with patch.dict(service.__salt__, {"service.start": func}):
with patch.dict(service.__opts__, {"test": True}):
assert service.mod_watch("salt", "running") == ret[2]
with patch.dict(service.__opts__, {"test": False}):
assert service.mod_watch("salt", "running") == ret[3]
assert service.mod_watch("salt", "stack") == ret[1]
def test_mod_beacon(tmp_path):
"""
Test to create a beacon based on a service
"""
name = "sshd"
with patch.dict(service.__salt__, {"beacons.list": MagicMock(return_value={})}):
with patch.dict(service.__states__, {"beacon.present": beaconstate.present}):
ret = service.mod_beacon(name, sfun="copy")
expected = {
"name": name,
"changes": {},
"result": False,
"comment": "service.copy does not work with the beacon state function",
}
assert ret == expected
event_returns = [
{
"complete": True,
"tag": "/salt/minion/minion_beacons_list_complete",
"beacons": {},
},
{
"complete": True,
"tag": "/salt/minion/minion_beacons_list_complete",
"beacons": {},
},
{
"complete": True,
"tag": "/salt/minion/minion_beacons_list_available_complete",
"beacons": ["service"],
},
{
"valid": True,
"tag": "/salt/minion/minion_beacon_validation_complete",
"vcomment": "Valid beacon configuration",
},
{
"complete": True,
"tag": "/salt/minion/minion_beacon_add_complete",
"beacons": {
"beacon_service_sshd": [
{
"services": {
"sshd": {
"onchangeonly": True,
"delay": 0,
"uncleanshutdown": None,
"emitatstartup": False,
},
}
},
{"interval": 60},
{"beacon_module": "service"},
]
},
},
]
mock = MagicMock(return_value=True)
beacon_state_mocks = {
"beacons.list": beaconmod.list_,
"beacons.add": beaconmod.add,
"beacons.list_available": beaconmod.list_available,
"event.fire": mock,
}
beacon_mod_mocks = {"event.fire": mock}
sock_dir = str(tmp_path / "test-socks")
with patch.dict(service.__states__, {"beacon.present": beaconstate.present}):
with patch.dict(beaconstate.__salt__, beacon_state_mocks):
with patch.dict(beaconmod.__salt__, beacon_mod_mocks):
with patch.dict(
beaconmod.__opts__, {"beacons": {}, "sock_dir": sock_dir}
):
with patch.object(
SaltEvent, "get_event", side_effect=event_returns
):
ret = service.mod_beacon(name, sfun="running", beacon="True")
expected = {
"name": "beacon_service_sshd",
"changes": {},
"result": True,
"comment": "Adding beacon_service_sshd to beacons",
}
assert ret == expected
@pytest.mark.skip_on_darwin(reason="service.running is currently failing on OSX")
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_running_with_reload():
"""
Test that a running service is properly reloaded
"""
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["grains"] = salt.loader.grains(opts)
utils = salt.loader.utils(opts)
modules = salt.loader.minion_mods(opts, utils=utils)
service_name = "cron"
cmd_name = "crontab"
os_family = opts["grains"]["os_family"]
os_release = opts["grains"]["osrelease"]
if os_family == "RedHat":
service_name = "crond"
elif os_family == "Arch":
service_name = "sshd"
cmd_name = "systemctl"
elif os_family == "MacOS":
service_name = "org.ntp.ntpd"
if int(os_release.split(".")[1]) >= 13:
service_name = "com.openssh.sshd"
elif os_family == "Windows":
service_name = "Spooler"
if os_family != "Windows" and salt.utils.path.which(cmd_name) is None:
pytest.skip("{} is not installed".format(cmd_name))
pre_srv_enabled = (
True if service_name in modules["service.get_enabled"]() else False
)
post_srv_disable = False
if not pre_srv_enabled:
modules["service.enable"](service_name)
post_srv_disable = True
try:
with patch.dict(service.__grains__, opts["grains"]), patch.dict(
service.__opts__, opts
), patch.dict(service.__salt__, modules), patch.dict(
service.__utils__, utils
), patch.dict(
service.__opts__, {"test": False}
), patch(
"salt.utils.systemd.offline", MagicMock(return_value=False)
):
service.dead(service_name, enable=False)
result = service.running(name=service_name, enable=True, reload=False)
if salt.utils.platform.is_windows():
comment = "Started service {}".format(service_name)
else:
comment = "Service {} has been enabled, and is running".format(service_name)
expected = {
"changes": {service_name: True},
"comment": comment,
"name": service_name,
"result": True,
}
assert result == expected
finally:
if post_srv_disable:
modules["service.disable"](service_name)
|
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabpolicy
version_added: "2.8"
short_description: Manage Azure Policy instance
description:
- Create, update and delete instance of Azure Policy.
options:
resource_group:
description:
- The name of the resource group.
required: True
lab_name:
description:
- The name of the lab.
required: True
policy_set_name:
description:
- The name of the policy set.
required: True
name:
description:
- The name of the policy.
required: True
description:
description:
- The description of the policy.
fact_name:
description:
- The fact name of the policy (e.g. C(lab_vm_count), C(lab_vm_size)), MaxVmsAllowedPerLab, etc.
choices:
- 'user_owned_lab_vm_count'
- 'user_owned_lab_premium_vm_count'
- 'lab_vm_count'
- 'lab_premium_vm_count'
- 'lab_vm_size'
- 'gallery_image'
- 'user_owned_lab_vm_count_in_subnet'
- 'lab_target_cost'
threshold:
description:
- The threshold of the policy (it could be either a maximum value or a list of allowed values).
type: raw
state:
description:
- Assert the state of the Policy.
- Use C(present) to create or update an Policy and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create DevTest Lab Policy
azure_rm_devtestlabpolicy:
resource_group: myResourceGroup
lab_name: myLab
policy_set_name: myPolicySet
name: myPolicy
fact_name: user_owned_lab_vm_count
threshold: 5
'''
RETURN = '''
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/policySets/
myPolicySet/policies/myPolicy"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDtlPolicy(AzureRMModuleBase):
"""Configuration class for an Azure RM Policy resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
policy_set_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
description=dict(
type='str'
),
fact_name=dict(
type='str',
choices=['user_owned_lab_vm_count',
'user_owned_lab_premium_vm_count',
'lab_vm_count',
'lab_premium_vm_count',
'lab_vm_size',
'gallery_image',
'user_owned_lab_vm_count_in_subnet',
'lab_target_cost']
),
threshold=dict(
type='raw'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.lab_name = None
self.policy_set_name = None
self.name = None
self.policy = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
required_if = [
('state', 'present', ['threshold', 'fact_name'])
]
super(AzureRMDtlPolicy, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.policy[key] = kwargs[key]
if self.state == 'present':
self.policy['status'] = 'Enabled'
dict_camelize(self.policy, ['fact_name'], True)
if isinstance(self.policy['threshold'], list):
self.policy['evaluator_type'] = 'AllowedValuesPolicy'
else:
self.policy['evaluator_type'] = 'MaxValuePolicy'
response = None
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_policy()
if not old_response:
self.log("Policy instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Policy instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if (not default_compare(self.policy, old_response, '', self.results)):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Policy instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_policy()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Policy instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_policy()
# This currently doesnt' work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Policy instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None),
'status': response.get('status', None)
})
return self.results
def create_update_policy(self):
'''
Creates or updates Policy with the specified configuration.
:return: deserialized Policy instance state dictionary
'''
self.log("Creating / Updating the Policy instance {0}".format(self.name))
try:
response = self.mgmt_client.policies.create_or_update(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name,
policy=self.policy)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Policy instance.')
self.fail("Error creating the Policy instance: {0}".format(str(exc)))
return response.as_dict()
def delete_policy(self):
'''
Deletes specified Policy instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Policy instance {0}".format(self.name))
try:
response = self.mgmt_client.policies.delete(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Policy instance.')
self.fail("Error deleting the Policy instance: {0}".format(str(e)))
return True
def get_policy(self):
'''
Gets the properties of the specified Policy.
:return: deserialized Policy instance state dictionary
'''
self.log("Checking if the Policy instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
policy_set_name=self.policy_set_name,
name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Policy instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Policy instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, None))
old = sorted(old, key=lambda x: x.get(key, None))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location':
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if str(new) == str(old):
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def dict_camelize(d, path, camelize_first):
if isinstance(d, list):
for i in range(len(d)):
dict_camelize(d[i], path, camelize_first)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = _snake_to_camel(old_value, camelize_first)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_camelize(sd, path[1:], camelize_first)
def dict_map(d, path, map):
if isinstance(d, list):
for i in range(len(d)):
dict_map(d[i], path, map)
elif isinstance(d, dict):
if len(path) == 1:
old_value = d.get(path[0], None)
if old_value is not None:
d[path[0]] = map.get(old_value, old_value)
else:
sd = d.get(path[0], None)
if sd is not None:
dict_map(sd, path[1:], map)
def main():
"""Main execution"""
AzureRMDtlPolicy()
if __name__ == '__main__':
main()
|
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Parameters module allows you to specify build parameters for a job.
**Component**: parameters
:Macro: parameter
:Entry Point: jenkins_jobs.parameters
Example::
job:
name: test_job
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
def base_param(parser, xml_parent, data, do_default, ptype):
pdef = XML.SubElement(xml_parent, ptype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
if do_default:
default = data.get('default', None)
if default:
XML.SubElement(pdef, 'defaultValue').text = default
else:
XML.SubElement(pdef, 'defaultValue')
return pdef
def string_param(parser, xml_parent, data):
"""yaml: string
A string parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.StringParameterDefinition')
def password_param(parser, xml_parent, data):
"""yaml: password
A password parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- password:
name: FOO
default: 1HSC0Ts6E161FysGf+e1xasgsHkgleLh09JUTYnipPvw=
description: "A parameter named FOO."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.PasswordParameterDefinition')
def bool_param(parser, xml_parent, data):
"""yaml: bool
A boolean parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- bool:
name: FOO
default: false
description: "A parameter named FOO, defaults to 'false'."
"""
data['default'] = str(data.get('default', False)).lower()
base_param(parser, xml_parent, data, True,
'hudson.model.BooleanParameterDefinition')
def file_param(parser, xml_parent, data):
"""yaml: file
A file parameter.
:arg str name: the target location for the file upload
:arg str description: a description of the parameter (optional)
Example::
parameters:
- file:
name: test.txt
description: "Upload test.txt."
"""
base_param(parser, xml_parent, data, False,
'hudson.model.FileParameterDefinition')
def text_param(parser, xml_parent, data):
"""yaml: text
A text parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- text:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(parser, xml_parent, data, True,
'hudson.model.TextParameterDefinition')
def label_param(parser, xml_parent, data):
"""yaml: label
A node label parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- label:
name: node
default: precise
description: "The node on which to run the job"
"""
base_param(parser, xml_parent, data, True,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'LabelParameterDefinition')
def choice_param(parser, xml_parent, data):
"""yaml: choice
A single selection parameter.
:arg str name: the name of the parameter
:arg list choices: the available choices
:arg str description: a description of the parameter (optional)
Example::
parameters:
- choice:
name: project
choices:
- nova
- glance
description: "On which project to run?"
"""
pdef = base_param(parser, xml_parent, data, False,
'hudson.model.ChoiceParameterDefinition')
choices = XML.SubElement(pdef, 'choices',
{'class': 'java.util.Arrays$ArrayList'})
a = XML.SubElement(choices, 'a', {'class': 'string-array'})
for choice in data['choices']:
XML.SubElement(a, 'string').text = choice
def run_param(parser, xml_parent, data):
"""yaml: run
A run parameter.
:arg str name: the name of the parameter
:arg str project-name: the name of job from which the user can pick runs
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/run-param001.yaml
:language: yaml
"""
pdef = base_param(parser, xml_parent, data, False,
'hudson.model.RunParameterDefinition')
XML.SubElement(pdef, 'projectName').text = data['project-name']
def extended_choice_param(parser, xml_parent, data):
"""yaml: extended-choice
Extended Choice Parameter
Requires the Jenkins `Extended Choice Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Extended+Choice+Parameter+plugin>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str type: parameter select type. Can be PT_SINGLE_SELECT,
PT_MULTI_SELECT, PT_RADIO, PT_CHECKBOX, PT_TEXTBOX
:arg str value: comma separated list of values
:arg str visible-item-count: number of lines to render for multi-select
(default 5)
:arg str multi-select-delimiter: value between selections when the
parameter is a multi-select (default ,)
:arg str default-value: default selected value
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/extended-choice-param001.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent,
'com.cwctravel.hudson.plugins.'
'extended__choice__parameter.'
'ExtendedChoiceParameterDefinition')
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
types_list = ['PT_SINGLE_SELECT',
'PT_MULTI_SELECT',
'PT_RADIO',
'PT_CHECKBOX',
'PT_TEXTBOX']
type = data['type']
if type not in types_list:
raise JenkinsJobsException(
'extended-choice type must be one of: '
+ ', '.join(types_list))
else:
XML.SubElement(pdef, 'type').text = type
XML.SubElement(pdef, 'value').text = data.get('value', '')
XML.SubElement(pdef, 'visibleItemCount').text = data.get(
'visible-item-count', '5')
XML.SubElement(pdef, 'multiSelectDelimiter').text = data.get(
'multi-select-delimiter', ',')
XML.SubElement(pdef, 'quoteValue').text = 'false'
XML.SubElement(pdef, 'defaultValue').text = data.get(
'default-value', '')
def validating_string_param(parser, xml_parent, data):
"""yaml: validating-string
A validating string parameter
Requires the Jenkins `Validating String Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Validating+String+Parameter+Plugin>`_
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str regex: a regular expression to validate the string
:arg str msg: a message to display upon failed validation
Example::
parameters:
- validating-string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
regex: [A-Za-z]*
msg: Your entered value failed validation
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.plugins.validating__string__parameter.'
'ValidatingStringParameterDefinition')
XML.SubElement(pdef, 'regex').text = data['regex']
XML.SubElement(pdef, 'failedValidationMessage').text = data['msg']
def svn_tags_param(parser, xml_parent, data):
"""yaml: svn-tags
A svn tag parameter
Requires the Jenkins `Parameterized Trigger Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Parameterized+Trigger+Plugin>`_
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str url: the url to list tags from
:arg str filter: the regular expression to filter tags
Example::
parameters:
- svn-tags:
name: BRANCH_NAME
default: release
description: A parameter named BRANCH_NAME default is release
url: http://svn.example.com/repo
filter: [A-za-z0-9]*
"""
pdef = base_param(parser, xml_parent, data, True,
'hudson.scm.listtagsparameter.'
'ListSubversionTagsParameterDefinition')
XML.SubElement(pdef, 'tagsDir').text = data['url']
XML.SubElement(pdef, 'tagsFilter').text = data.get('filter', None)
XML.SubElement(pdef, 'reverseByDate').text = "true"
XML.SubElement(pdef, 'reverseByName').text = "false"
XML.SubElement(pdef, 'maxTags').text = "100"
XML.SubElement(pdef, 'uuid').text = "1-1-1-1-1"
def dynamic_choice_param(parser, xml_parent, data):
"""yaml: dynamic-choice
Dynamic Choice Parameter
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices.
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice:
name: OPTIONS
description: "Available options"
script: "['optionA', 'optionB']"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'ChoiceParameterDefinition')
def dynamic_string_param(parser, xml_parent, data):
"""yaml: dynamic-string
Dynamic Parameter
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script: "bar"
remote: false
read-only: false
"""
dynamic_param_common(parser, xml_parent, data, 'StringParameterDefinition')
def dynamic_choice_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-choice-scriptler
Dynamic Choice Parameter (Scriptler)
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice-scriptler:
name: OPTIONS
description: "Available options"
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerChoiceParameterDefinition')
def dynamic_string_scriptler_param(parser, xml_parent, data):
"""yaml: dynamic-string-scriptler
Dynamic Parameter (Scriptler)
Requires the Jenkins `Jenkins Dynamic Parameter Plug-in.
<https://wiki.jenkins-ci.org/display/JENKINS/
Jenkins+Dynamic+Parameter+Plug-in>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string-scriptler:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(parser, xml_parent, data,
'ScriptlerStringParameterDefinition')
def dynamic_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
+ ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__script').text = data.get('script', None)
localBaseDir = XML.SubElement(pdef, '__localBaseDirectory',
{'serialization': 'custom'})
filePath = XML.SubElement(localBaseDir, 'hudson.FilePath')
default = XML.SubElement(filePath, 'default')
XML.SubElement(filePath, 'boolean').text = "true"
XML.SubElement(default, 'remote').text = \
"/var/lib/jenkins/dynamic_parameter/classpath"
XML.SubElement(pdef, '__remoteBaseDirectory').text = \
"dynamic_parameter_classpath"
XML.SubElement(pdef, '__classPath').text = data.get('classpath', None)
XML.SubElement(pdef, 'readonlyInputField').text = str(
data.get('read-only', False)).lower()
def dynamic_scriptler_param_common(parser, xml_parent, data, ptype):
pdef = base_param(parser, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
'scriptler.' + ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__scriptlerScriptId').text = data.get(
'script-id', None)
parametersXML = XML.SubElement(pdef, '__parameters')
parameters = data.get('parameters', [])
if parameters:
for parameter in parameters:
parameterXML = XML.SubElement(parametersXML,
'com.seitenbau.jenkins.plugins.'
'dynamicparameter.scriptler.'
'ScriptlerParameterDefinition_'
'-ScriptParameter')
XML.SubElement(parameterXML, 'name').text = parameter['name']
XML.SubElement(parameterXML, 'value').text = parameter['value']
XML.SubElement(pdef, 'readonlyInputField').text = str(data.get(
'read-only', False)).lower()
def matrix_combinations_param(parser, xml_parent, data):
"""yaml: matrix-combinations
Matrix combinations parameter
Requires the Jenkins `Matrix Combinations Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Matrix+Combinations+Plugin>`_
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str filter: Groovy expression to use filter the combination by
default (optional)
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/matrix-combinations-param001.yaml
:language: yaml
"""
element_name = 'hudson.plugins.matrix__configuration__parameter.' \
'MatrixCombinationsParameterDefinition'
pdef = XML.SubElement(xml_parent, element_name)
if 'name' not in data:
raise JenkinsJobsException('matrix-combinations must have a name '
'parameter.')
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
combination_filter = data.get('filter')
if combination_filter:
XML.SubElement(pdef, 'defaultCombinationFilter').text = \
combination_filter
return pdef
class Parameters(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'parameter'
component_list_type = 'parameters'
def gen_xml(self, parser, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
parameters = data.get('parameters', [])
if parameters:
pdefp = XML.SubElement(properties,
'hudson.model.ParametersDefinitionProperty')
pdefs = XML.SubElement(pdefp, 'parameterDefinitions')
for param in parameters:
self.registry.dispatch('parameter',
parser, pdefs, param)
|
|
"""Fabfile for the ``webfaction-django-boilerplate``.
Make sure to setup your ``fabric_settings.py`` first. As a start, just copy
``fabric_settings.py.sample``.
"""
from __future__ import with_statement
import os
from fabric.api import (
cd,
env,
lcd,
local,
run,
settings,
)
from fabric.contrib.files import append, contains, exists, sed
import fabric_settings as fab_settings
env.hosts = fab_settings.ENV_HOSTS
env.user = fab_settings.ENV_USER
BASHRC_SETTING1 = 'export VIRTUALENVWRAPPER_PYTHON=/usr/local/bin/python2.7'
BASHRC_SETTING2 = 'export WORKON_HOME=$HOME/Envs'
BASHRC_SETTING3 = 'source /home/{0}/bin/virtualenvwrapper.sh'.format(env.user)
BASHRC_SETTING4 = 'export PIP_VIRTUALENV_BASE=$WORKON_HOME'
BASHRC_SETTING5 = 'export PIP_RESPECT_VIRTUALENV=true'
PROJECT_NAME = fab_settings.PROJECT_NAME
FILE_SCRIPT_SETTINGS = 'script-settings-{0}.sh'.format(PROJECT_NAME)
FILE_DEPLOY_WEBSITE = 'deploy-website-{0}.sh'.format(PROJECT_NAME)
FILE_MYSQL_BACKUP = 'mysql-backup-{0}.sh'.format(PROJECT_NAME)
FILE_PG_BACKUP = 'pg-backup-{0}.sh'.format(PROJECT_NAME)
FILE_LOCALE_BACKUP = 'locale-backup-{0}.sh'.format(PROJECT_NAME)
FILE_RESTART_APACHE = 'restart-apache-{0}.sh'.format(PROJECT_NAME)
FILE_DJANGO_CLEANUP = 'django-cleanup-{0}.sh'.format(PROJECT_NAME)
FILE_CRONTAB = 'crontab-{0}.txt'.format(PROJECT_NAME)
FILE_SHOW_MEMORY = 'show-memory.sh'
FILE_PGPASS = '.pgpass-{0}'.format(PROJECT_NAME)
# ****************************************************************************
# HIGH LEVEL TASKS
# ****************************************************************************
def install_everything():
install_local_repo()
install_server()
local_link_repo_with_remote_repo()
first_deployment()
def first_deployment():
run_delete_previous_attempts()
run_create_virtualenv()
run_clone_repo()
run_install_scripts()
run_install_pgpass()
run_install_crontab()
run_delete_django()
run_install_requirements()
run_deploy_website(with_manage_py=False)
run_prepare_local_settings()
run_deploy_website()
run_loaddata_auth()
def install_local_repo():
local_create_new_repo()
local_init_django_project()
local_create_fab_settings()
local_initial_commit()
def install_server():
run_install_virtualenv()
run_install_mercurial()
run_add_bashrc_settings()
run_create_git_repo()
run_delete_index_files()
# ****************************************************************************
# LOCAL TASKS
# ****************************************************************************
def local_link_repo_with_remote_repo():
with lcd(fab_settings.PROJECT_ROOT):
local('git config http.sslVerify false')
local('git config http.postBuffer 524288000')
with settings(warn_only=True):
local('git remote rm origin')
local('git remote add origin'
' {0}@{0}.webfactional.com:'
'/home/{0}/webapps/git/repos/{1}'.format(
fab_settings.ENV_USER, fab_settings.GIT_REPO_NAME))
local('git push -u origin master')
def local_create_fab_settings():
fabfile_dir = os.path.join(fab_settings.PROJECT_ROOT, 'website',
'webapps', 'django', 'project', 'fabfile')
with lcd(fabfile_dir):
local('cp fab_settings.py.sample fab_settings.py')
local("sed -i -r -e 's/INSERT_PROJECT_NAME/{0}/g'"
" fab_settings.py".format(PROJECT_NAME))
local("sed -i -r -e 's/INSERT_ENV_USER/{0}/g'"
" fab_settings.py".format(fab_settings.ENV_USER))
def local_create_new_repo():
with lcd(fab_settings.PROJECT_ROOT):
local('rm -rf .git')
local('rm -f .gitmodules')
local('rm -rf website/webapps/django/project/submodules/Skeleton')
local('git init')
local('git submodule add git://github.com/dhgamache/Skeleton.git'
' website/webapps/django/project/submodules/Skeleton')
def local_init_django_project():
with lcd(fab_settings.DJANGO_PROJECT_ROOT):
# prepare local_settings.py
local('cp settings/local/local_settings.py.sample'
' settings/local/local_settings.py')
local("sed -i -r -e 's/MEDIA_APP_NAME/media/g'"
" settings/local/local_settings.py")
local("sed -i -r -e 's/STATIC_APP_NAME/static/g'"
" settings/local/local_settings.py")
local('cp fabfile/fab_settings.py.sample'
' fabfile/fab_settings.py')
# prepare gorun_settings.py
local('cp settings/local/gorun_settings.py.sample'
' gorun_settings.py')
# prepare urls.py
local("sed -i -r -e 's/XXXX/{0}/g' urls.py".format(
fab_settings.ADMIN_URL))
# initialize local Django project
local('python manage.py syncdb --all --noinput')
local('python manage.py migrate --fake')
local('python manage.py loaddata bootstrap_auth.json')
def local_initial_commit():
with lcd(fab_settings.PROJECT_ROOT):
local('git add .')
local('git commit -am "Initial commit."')
# ****************************************************************************
# REMOTE TASKS
# ****************************************************************************
def run_add_bashrc_settings():
with cd('$HOME'):
append('.bashrc', BASHRC_SETTING1, partial=True)
append('.bashrc', BASHRC_SETTING2, partial=True)
append('.bashrc', BASHRC_SETTING3, partial=True)
append('.bashrc', BASHRC_SETTING4, partial=True)
append('.bashrc', BASHRC_SETTING5, partial=True)
def run_clone_repo():
run('mkdir -p $HOME/src')
cloned_repo_path = '$HOME/src/{0}'.format(PROJECT_NAME)
if exists(cloned_repo_path):
run('rm -rf {0}'.format(cloned_repo_path))
with cd('$HOME/src'):
run('git clone $HOME/webapps/git/repos/{0} {1}'.format(
fab_settings.GIT_REPO_NAME, PROJECT_NAME))
with cd('$HOME/src/{0}'.format(PROJECT_NAME)):
run('git submodule init')
run('git submodule update')
def run_create_git_repo():
run('rm -rf $HOME/webapps/git/repos/{0}'.format(
fab_settings.GIT_REPO_NAME))
with cd('$HOME/webapps/git'):
run('git init --bare ./repos/{0}'.format(fab_settings.GIT_REPO_NAME))
with cd('$HOME/webapps/git/repos/{0}'.format(fab_settings.GIT_REPO_NAME)):
run('git config http.receivepack true')
def run_create_ssh_dir():
with cd('$HOME'):
with settings(warn_only=True):
run('mkdir .ssh')
run('touch .ssh/authorized_keys')
run('chmod 600 .ssh/authorized_keys')
run('chmod 700 .ssh')
def run_create_virtualenv():
with cd('$HOME'):
run('rm -rf $HOME/Envs/{0}'.format(fab_settings.VENV_NAME))
run('mkvirtualenv -p python2.7 --system-site-packages {0}'.format(
fab_settings.VENV_NAME))
def run_delete_index_files():
run('rm -f $HOME/webapps/{0}/index.html'.format(
fab_settings.MEDIA_APP_NAME))
run('rm -f $HOME/webapps/{0}/index.html'.format(
fab_settings.STATIC_APP_NAME))
def run_delete_previous_attempts():
run('rm -rf $HOME/webapps/{0}/project'.format(
fab_settings.DJANGO_APP_NAME))
run('rm -rf $HOME/Envs/{0}/'.format(fab_settings.VENV_NAME))
run('rm -rf $HOME/src/{0}/'.format(PROJECT_NAME))
run('rm -rf $HOME/bin/*{0}*.*'.format(PROJECT_NAME))
with cd('$HOME'):
run('touch .pgpass')
run("sed '/{0}/d' .pgpass > .pgpass_tmp".format(fab_settings.DB_NAME))
run('mv .pgpass_tmp .pgpass')
run('crontab -l > crontab_bak')
run("sed '/{0}.sh/d' crontab_bak > crontab_tmp".format(
fab_settings.PROJECT_NAME))
run('crontab crontab_tmp')
run('rm crontab_tmp')
def run_deploy_website(with_manage_py=True):
args = ' 1'
if with_manage_py:
args = ''
run('workon {0} && deploy-website-{1}.sh{2}'.format(fab_settings.VENV_NAME,
PROJECT_NAME, args))
def run_install_crontab():
run('mkdir -p $HOME/mylogs/cron/')
with cd('$HOME/bin/'):
run('crontab -l > crontab_tmp')
run('cat crontab-{0}.txt >> crontab_tmp'.format(
PROJECT_NAME))
run('crontab crontab_tmp')
run('rm crontab_tmp')
def run_install_mercurial():
with cd('$HOME'):
run('easy_install-2.7 mercurial')
def run_install_pgpass():
with cd('$HOME'):
run('touch .pgpass')
run('chmod 0600 .pgpass')
if not contains('.pgpass', fab_settings.DB_NAME):
run('cat {0} > .pgpass'.format(FILE_PGPASS))
run('rm {0}'.format(FILE_PGPASS))
def run_install_requirements():
run('workon {0} && pip install -r $HOME/src/{1}/website/webapps/django/'
'project/requirements.txt --upgrade'.format(
fab_settings.VENV_NAME, PROJECT_NAME))
def run_install_scripts():
with cd('$HOME/src/{0}/scripts'.format(PROJECT_NAME)):
run('git pull origin master')
run('cp deploy-website.sh $HOME/bin/{0}'.format(FILE_DEPLOY_WEBSITE))
run('cp mysql-backup.sh $HOME/bin/{0}'.format(FILE_MYSQL_BACKUP))
run('cp pg-backup.sh $HOME/bin/{0}'.format(FILE_PG_BACKUP))
run('cp locale-backup.sh $HOME/bin/{0}'.format(FILE_LOCALE_BACKUP))
run('cp restart-apache.sh $HOME/bin/{0}'.format(FILE_RESTART_APACHE))
run('cp django-cleanup.sh $HOME/bin/{0}'.format(FILE_DJANGO_CLEANUP))
run('cp script-settings.sh $HOME/bin/{0}'.format(FILE_SCRIPT_SETTINGS))
run('cp crontab.txt $HOME/bin/{0}'.format(FILE_CRONTAB))
run('cp {0} $HOME/bin/{0}'.format(FILE_SHOW_MEMORY))
# This one goes to $HOME
run('cp .pgpass $HOME/{0}'.format(FILE_PGPASS))
with cd('$HOME/bin'):
sed(FILE_SCRIPT_SETTINGS, 'INSERT_USERNAME', fab_settings.ENV_USER)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_USER', fab_settings.DB_USER)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_NAME', fab_settings.DB_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DB_PASSWORD',
fab_settings.DB_PASSWORD)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_PROJECT_NAME', PROJECT_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_DJANGO_APP_NAME',
fab_settings.DJANGO_APP_NAME)
sed(FILE_SCRIPT_SETTINGS, 'INSERT_VENV_NAME', fab_settings.VENV_NAME)
sed(FILE_DEPLOY_WEBSITE, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_MYSQL_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_PG_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_LOCALE_BACKUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_RESTART_APACHE, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_DJANGO_CLEANUP, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_CRONTAB, 'INSERT_PROJECTNAME', PROJECT_NAME)
sed(FILE_SHOW_MEMORY, 'INSERT_PROJECTNAME', PROJECT_NAME)
run('rm -f *.bak')
with cd('$HOME'):
sed(FILE_PGPASS, 'INSERT_DB_NAME', fab_settings.DB_NAME)
sed(FILE_PGPASS, 'INSERT_DB_USER', fab_settings.DB_USER)
sed(FILE_PGPASS, 'INSERT_DB_PASSWORD', fab_settings.DB_PASSWORD)
def run_install_virtualenv():
with cd('$HOME'):
run('mkdir -p $HOME/lib/python2.7')
run('easy_install-2.7 virtualenv')
run('easy_install-2.7 pip')
run('pip install virtualenvwrapper')
run('mkdir -p $HOME/Envs')
def run_loaddata_auth():
with cd('$HOME/webapps/{0}/project/'.format(
fab_settings.DJANGO_APP_NAME)):
run('workon {0} && ./manage.py loaddata bootstrap_auth.json'.format(
fab_settings.VENV_NAME))
def run_prepare_local_settings():
with cd('$HOME/webapps/{0}/project/settings/local'.format(
fab_settings.DJANGO_APP_NAME)):
run('cp local_settings.py.sample local_settings.py')
sed('local_settings.py', 'backends.sqlite3',
'backends.postgresql_psycopg2')
sed('local_settings.py', 'db.sqlite', fab_settings.DB_NAME)
sed('local_settings.py', '"USER": ""', '"USER": "{0}"'.format(
fab_settings.DB_USER))
sed('local_settings.py', '"PASSWORD": ""', '"PASSWORD": "{0}"'.format(
fab_settings.DB_PASSWORD))
sed('local_settings.py', 'yourproject', '{0}'.format(
PROJECT_NAME))
sed('local_settings.py', '##EMAIL_BACKEND', 'EMAIL_BACKEND')
sed('local_settings.py', 'FROM_EMAIL = "[email protected]"',
'FROM_EMAIL = "{0}"'.format(fab_settings.EMAIL_DEFAULT_FROM_EMAIL))
sed('local_settings.py', 'MAILER_EMAIL_BACKEND', '#MAILER_EMAIL_BACKEND') # NOQA
sed('local_settings.py', 'TEST_EMAIL_BACKEND_RECEPIENTS', '#TEST_EMAIL_BACKEND_RECEPIENTS') # NOQA
sed('local_settings.py', 'FROM_EMAIL =', '#FROM_EMAIL =')
sed('local_settings.py', '##FROM_EMAIL', 'FROM_EMAIL')
sed('local_settings.py', 'DEFAULT_#FROM_EMAIL', 'DEFAULT_FROM_EMAIL')
sed('local_settings.py', 'EMAIL_SUBJECT_PREFIX', '#EMAIL_SUBJECT_PREFIX') # NOQA
sed('local_settings.py', '##EMAIL_SUBJECT_PREFIX', 'EMAIL_SUBJECT_PREFIX') # NOQA
sed('local_settings.py', 'EMAIL_HOST =', '#EMAIL_HOST =')
sed('local_settings.py', '##EMAIL_HOST', 'EMAIL_HOST')
sed('local_settings.py', 'EMAIL_HOST_USER = FROM_EMAIL', '#EMAIL_HOST_USER = FROM_EMAIL') # NOQA
sed('local_settings.py', '#EMAIL_HOST_USER = ""',
'EMAIL_HOST_USER = "{0}"'.format(fab_settings.EMAIL_INBOX))
sed('local_settings.py', 'EMAIL_HOST_PASSWORD', '#EMAIL_HOST_PASSWORD')
sed('local_settings.py', '##EMAIL_HOST_PASSWORD = ""',
'EMAIL_HOST_PASSWORD = "{0}"'.format(fab_settings.EMAIL_PASSWORD))
sed('local_settings.py', 'EMAIL_PORT', '#EMAIL_PORT')
sed('local_settings.py', '##EMAIL_PORT', 'EMAIL_PORT')
sed('local_settings.py', 'MEDIA_APP_NAME', fab_settings.MEDIA_APP_NAME)
sed('local_settings.py', 'STATIC_APP_NAME',
fab_settings.STATIC_APP_NAME)
sed('local_settings.py', 'yourname', fab_settings.ADMIN_NAME)
sed('local_settings.py', '[email protected]', fab_settings.ADMIN_EMAIL)
run('rm -f *.bak')
def run_delete_django():
with cd('$HOME/webapps/{0}/lib/python2.7/'.format(
fab_settings.DJANGO_APP_NAME)):
run('rm -rf django')
run('rm -rf Django*')
|
|
# encoding: utf-8
# Broadcast protocol used to build a distributed cluster of solver processes.
#
# This code uses the twisted event driven networking framework:
# http://twistedmatrix.com/documents/13.0.0/core/howto/servers.html
#
import json
import logging
import threading
import time
import twisted
from twisted.internet.endpoints import clientFromString
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
__author__ = 'Jason Ansel'
log = logging.getLogger(__name__)
# Simple shared secret to identify our peers
PROTO_PASSWORD = 'ooLeel9aiJ4iW1nei1sa8Haichaig2ch'
class GlobalBest(object):
"""
Singleton class used to store the global best. Lock is required in
the worker processes as networking code runs in a different thread than
the solver.
"""
puzzle_id = None
score = None
solution = None
timestamp = time.time()
lock = threading.Lock()
@classmethod
def reset(cls, puzzle_id=None):
"""
Called when starting a new puzzle
:param puzzle_id: string identifying the puzzle being solved
"""
with cls.lock:
cls.puzzle_id = puzzle_id
cls.score = None
cls.solution = None
cls.timestamp = time.time()
@classmethod
def update(cls, puzzle_id, score, solution):
"""
Replace the current global best if score is lower than GlobalBest.score
:param puzzle_id: string identifying the puzzle being solved
:param score: number of squares required by solution
:param solution: packed permutation representation
:return: True if a new global best was established
"""
with cls.lock:
if puzzle_id != cls.puzzle_id:
log.warning('discarding results for wrong puzzle %s != %s',
puzzle_id, cls.puzzle_id)
elif cls.score is None or score < cls.score:
cls.score = score
cls.solution = solution
cls.timestamp = time.time()
return True
return False
class GrideaProtocol(LineReceiver):
"""
Network protocol used to communicate problem instances and solutions
to other processes. All messages are broadcast to entire network and
consist of a JSON string on a single line. There exist two message types:
1) Problem instances, cause workers to start solving:
{'id': string, 'puzzle': [[...], ...], ...}
2) New global best solutions, sent by workers:
{'puzzle_id': string, 'score': int, 'solution': [...]}
"""
peers = set() # GrideaProtocol() instances
broadcast_lock = threading.Lock()
def __init__(self, worker=None, on_connect=None):
"""
:param worker: optional instance of gridea.GrideaWorker()
:param on_connect: optional callback for after connection
"""
self.worker = worker
self.on_connect = on_connect
self.logged_in = False
def connectionMade(self):
"""
Called by twisted framework on connect.
"""
self.transport.setTcpKeepAlive(True)
self.transport.setTcpNoDelay(True)
if isinstance(self.transport, twisted.internet.tcp.Client):
self.sendLine(PROTO_PASSWORD)
self.logged_in = True
GrideaProtocol.peers.add(self)
log.info('connect (%d peers)', len(GrideaProtocol.peers))
if self.on_connect:
self.on_connect()
def connectionLost(self, reason=None):
"""
Called by twisted framework on disconnect.
"""
GrideaProtocol.peers.discard(self)
log.info('disconnect (%d peers)', len(GrideaProtocol.peers))
if (isinstance(self.transport, twisted.internet.tcp.Client) and
reactor.running):
log.info('shutting down')
reactor.stop()
def lineReceived(self, line):
"""
Called by twisted framework from incoming network messages.
:param line: the line received from the network
"""
if not self.logged_in:
return self.login(line)
msg = json.loads(line)
if 'puzzle' in msg:
# Start solving a new puzzle instance
GlobalBest.reset(msg['id'])
if self.worker:
reactor.callInThread(self.worker.solve, msg)
self.broadcast(line)
log.debug('got new puzzle %s', msg['id'])
elif 'score' in msg:
# A new global best was found by other process
self.best(msg['puzzle_id'], msg['score'], msg['solution'])
def login(self, password):
"""
Called for any message sent by a client not logged in. We use a
simple shared secret auth to make sure we are talking to others who
speak the same protocol.
:param password: the message from the client
"""
if password == PROTO_PASSWORD:
self.logged_in = True
GrideaProtocol.peers.add(self)
log.info('login ok (%d peers)', len(GrideaProtocol.peers))
else:
self.transport.loseConnection()
log.info('login failed (%d peers)', len(GrideaProtocol.peers))
def broadcast(self, line):
"""
Broadcast line to all connected peers. Broadcast lock is only required
in worker processes as the solver will send from another thread.
:param line: the line to broadcast
"""
with GrideaProtocol.broadcast_lock:
for peer in GrideaProtocol.peers:
if peer is not self:
peer.sendLine(line)
def best(self, puzzle_id, score, solution):
"""
Record a new solution to the puzzle, and broadcast it to other
processes if it is a new global best.
:param puzzle_id: string identifying the puzzle being solved
:param score: number of squares required by solution
:param solution: packed permutation representation
"""
if GlobalBest.update(puzzle_id, score, solution):
self.broadcast(json.dumps({'puzzle_id': puzzle_id, 'score': score,
'solution': solution}))
def listen(port):
"""
Start a server using GrideaProtocol
:param port: port to listen on
"""
class ServerFactory(Factory):
protocol = GrideaProtocol
reactor.listenTCP(port, ServerFactory())
def connect(hostname, worker=None, on_connect=None):
"""
Connect to server using GrideaProtocol, automatically retry if it is
not yet running.
:param hostname: `hostname:port` to connect to
:param worker: optional gridea.GrideaWorker() to make this process a worker
:param on_connect: optional callback after connection
"""
class ClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr):
return GrideaProtocol(worker, on_connect)
clientFromString(reactor, 'tcp:' + hostname).connect(ClientFactory())
|
|
# coding: utf-8
from __future__ import unicode_literals
import unittest
import sys
from pymatgen.analysis.defects.point_defects import *
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.bond_valence import BVAnalyzer
from monty.os.path import which
from pymatgen.io.cifio import CifParser
try:
import zeo
except ImportError:
zeo = None
gulp_present = which('gulp')
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ValenceIonicRadiusEvaluatorTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0,0,0], [0.5,0.5,0], [0.5,0,0.5], [0,0.5,0.5],
[0.5,0,0], [0,0.5,0], [0,0,0.5], [0.5,0.5,0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
#self._si = Cssr.from_file("../../../../test_files/Si.cssr").structure
#self._ci_valrad_evaluator = ValenceIonicRadiusEvaluator(self._si)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
class ValenceIonicRadiusEvaluatorMultiOxiTest(unittest.TestCase):
def setUp(self):
"""
Setup Fe3O4 structure for testing multiple oxidation states
"""
cif_ob = CifParser(os.path.join(test_dir, "Fe3O4.cif"))
self._struct = cif_ob.get_structures()[0]
self._valrad_evaluator = ValenceIonicRadiusEvaluator(self._struct)
self._length = len(self._struct.sites)
def test_valences_ionic_structure(self):
valence_set = set(self._valrad_evaluator.valences.values())
self.assertEqual(valence_set, {2,3,-2})
def test_radii_ionic_structure(self):
radii_set = set(self._valrad_evaluator.radii.values())
self.assertEqual(len(radii_set), 3)
self.assertEqual(radii_set, {0.72,0.75,1.26})
@unittest.skipIf(not zeo, "zeo not present.")
class VacancyTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
bv = BVAnalyzer()
self._mgo_uc = bv.get_oxi_state_decorated_structure(self._mgo_uc)
self._mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = self._mgo_val_rad_eval.valences
self._mgo_rad = self._mgo_val_rad_eval.radii
self._mgo_vac = Vacancy(self._mgo_uc, self._mgo_val, self._mgo_rad)
def test_defectsite_count(self):
self.assertTrue(self._mgo_vac.defectsite_count() == 2,
"Vacancy count wrong")
def test_enumerate_defectsites(self):
"""
The vacancy sites should be the lattice sites.
And there should be only two unique vacancy sites for MgO.
"""
uniq_sites = []
uniq_sites.append(self._mgo_uc.sites[3])
uniq_sites.append(self._mgo_uc.sites[7])
uniq_def_sites = self._mgo_vac.enumerate_defectsites()
#Match uniq_sites iwth uniq_def_sites
#self.assertTrue(len(uniq_def_sites) == 2, "Vacancy init failed")
#mgo_spg = Spacegroup(int_number=225)
#self.assertTrue(mgo_spg.are_symmetrically_equivalent(uniq_sites,
# uniq_def_sites), "Vacancy init failed")
def test_get_defectsite_index(self):
for i in range(self._mgo_vac.defectsite_count()):
self.assertTrue(self._mgo_vac.get_defectsite_structure_index(i) <
len(self._mgo_uc.sites),
"Defect site index beyond range")
def test_gt_defectsite_coordination_number(self):
for i in range(self._mgo_vac.defectsite_count()):
self.assertTrue(
round(self._mgo_vac.get_defectsite_coordination_number(
i)) == 6.0, "Wrong coordination number")
def test_get_defectsite_coordinated_elements(self):
for i in range(self._mgo_vac.defectsite_count()):
site_index = self._mgo_vac.get_defectsite_structure_index(i)
site_el = self._mgo_uc[site_index].species_string
self.assertTrue(
site_el not in self._mgo_vac.get_coordinated_elements(
i), "Coordinated elements are wrong")
def test_get_defectsite_effective_charge(self):
for i in range(self._mgo_vac.defectsite_count()):
site_index = self._mgo_vac.get_defectsite_structure_index(i)
site_el = self._mgo_uc[site_index].species_and_occu
eff_charge = self._mgo_vac.get_defectsite_effective_charge(i)
if site_el["Mg2+"] == 1:
self.assertEqual(eff_charge, -2)
if site_el["O2-"] == 1:
self.assertEqual(eff_charge, 2)
def test_get_coordinatedsites_min_max_charge(self):
for i in range(self._mgo_vac.defectsite_count()):
min_chrg, max_chrg = self._mgo_vac.get_coordsites_min_max_charge(i)
self.assertEqual(min_chrg, max_chrg)
def test_make_supercells_with_defects(self):
scaling_matrix = [2,2,2]
vac_specie = ['Mg']
vac_scs = self._mgo_vac.make_supercells_with_defects(
scaling_matrix, vac_specie)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),2)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
vac_scs = self._mgo_vac.make_supercells_with_defects(scaling_matrix)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),3)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
vac_scs = self._mgo_vac.make_supercells_with_defects(
scaling_matrix,limit_return_structures=1)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),2)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
@unittest.skip("deprecated")
def test_get_volume(self):
for i in range(self._mgo_vac.defectsite_count()):
vol = self._mgo_vac.get_volume(i)
#Once the zeo++ is properly working, make sure vol is +ve
self.assertIsInstance(vol, float)
@unittest.skip("deprecated")
def test_get_surface_area(self):
for i in range(self._mgo_vac.defectsite_count()):
sa = self._mgo_vac.get_surface_area(i)
#Once the zeo++ is properly working, make sure vol is +ve
self.assertIsInstance(sa, float)
@unittest.skipIf(not gulp_present, "gulp not present.")
class VacancyFormationEnergyTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_vac = Vacancy(self.mgo_uc, val, rad)
self.mgo_vfe = VacancyFormationEnergy(self.mgo_vac)
def test_get_energy(self):
for i in range(len(self.mgo_vac.enumerate_defectsites())):
vfe = self.mgo_vfe.get_energy(i)
print(vfe)
self.assertIsInstance(vfe, float)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
self.assertTrue(len(uniq_def_sites) == 2, "Interstitial init failed")
def test_defectsite_count(self):
print(self._mgo_interstitial.defectsite_count())
self.assertTrue(self._mgo_interstitial.defectsite_count() == 2,
"Vacancy count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_defectsite_coordination_number(
i))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
print(rad)
self.assertTrue(rad, float)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialVoronoiFaceCenterTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad,
site_type='voronoi_facecenter'
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
print("Length of voronoi face centers", len(uniq_def_sites))
self.assertTrue(len(uniq_def_sites) == 2, "Defect site count wrong")
def test_defectsite_count(self):
print(self._mgo_interstitial.defectsite_count())
self.assertTrue(self._mgo_interstitial.defectsite_count() == 2,
"Vacancy count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
coord_no=self._mgo_interstitial.get_defectsite_coordination_number(
i)
self.assertTrue(isinstance(coord_no, float))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
self.assertAlmostEqual(rad,0.0)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialHighAccuracyTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad, accuracy='High'
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
for site in uniq_def_sites:
self.assertIsInstance(site, PeriodicSite, "Returned objects are not sites")
#print len(uniq_def_sites)
#self.assertTrue(len(uniq_def_sites) == 2, "Interstitial init failed")
def test_defectsite_count(self):
self.assertIsNotNone(self._mgo_interstitial.defectsite_count(),
"Interstitial count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_defectsite_coordination_number(
i))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
print(rad)
self.assertTrue(rad, float)
"""
Some of the tests are nearly useless. Better tests are needed
"""
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class InterstitialAnalyzerTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
self.mgo_ia = InterstitialAnalyzer(self.mgo_inter, 'Mg', 2)
def test_get_relaxedenergy(self):
for i in range(len(self.mgo_inter.enumerate_defectsites())):
ife = self.mgo_ia.get_energy(i, True)
site_coords = self.mgo_inter.get_defectsite(i).coords
site_radius = self.mgo_inter.get_radius(i)
#print(i, site_coords, site_radius, ife)
self.assertIsInstance(ife, float)
def test_get_norelaxedenergy(self):
for i in range(self.mgo_inter.defectsite_count()):
ife = self.mgo_ia.get_energy(i, False)
site_coords = self.mgo_inter.get_defectsite(i).coords
site_radius = self.mgo_inter.get_radius(i)
print(i, site_coords, site_radius, ife)
self.assertIsInstance(ife, float)
def test_get_percentage_volume_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_vol = self.mgo_ia.get_percentage_volume_change(i)
#print(i, del_vol)
def test_get_percentage_lattice_parameter_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_lat = self.mgo_ia.get_percentage_lattice_parameter_change(i)
#print(i, del_lat)
def test_get_percentage_bond_distance_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_bd = self.mgo_ia.get_percentage_bond_distance_change(i)
#print(i, del_bd)
def test_relaxed_structure_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.mgo_ia.relaxed_structure_match(i, j)
#print(i, j, match)
if i == j:
self.assertTrue(match)
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class InterstitialStructureRelaxerTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
self.isr = InterstitialStructureRelaxer(self.mgo_inter, 'Mg', 2)
def test_relaxed_structure_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.isr.relaxed_structure_match(i, j)
#print i, j, match
if i == j:
self.assertTrue(match)
def test_relaxed_energy_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.isr.relaxed_energy_match(i, j)
#print i, j, match
if i == j:
self.assertTrue(match)
def test_get_relaxed_structure(self):
for i in range(self.mgo_inter.defectsite_count()):
relax_struct = self.isr.get_relaxed_structure(i)
self.assertIsInstance(relax_struct, Structure)
def test_get_relaxed_energy(self):
for i in range(self.mgo_inter.defectsite_count()):
energy = self.isr.get_relaxed_energy(i)
self.assertIsInstance(energy, float)
def test_get_relaxed_interstitial(self):
ri = self.isr.get_relaxed_interstitial()
self.assertIsInstance(ri, RelaxedInterstitial)
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class RelaxedInsterstitialTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
isr = InterstitialStructureRelaxer(self.mgo_inter, 'Mg', 2)
self.ri = isr.get_relaxed_interstitial()
def test_formation_energy(self):
for i in range(self.mgo_inter.defectsite_count()):
ife = self.ri.formation_energy(i)
self.assertIsInstance(ife, float)
print("ife", ife)
def test_get_percentage_volume_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_vol = self.ri.get_percentage_volume_change(i)
self.assertIsInstance(del_vol, float)
print("del_vol", del_vol)
def test_get_percentage_lattice_parameter_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_lat = self.ri.get_percentage_lattice_parameter_change(i)
self.assertNotEqual(del_lat['a'], 0)
self.assertNotEqual(del_lat['b'], 0)
self.assertNotEqual(del_lat['c'], 0)
print("del_lat", del_lat)
def test_get_percentage_bond_distance_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_bd = self.ri.get_percentage_bond_distance_change(i)
#self.assertIsInstance(del_bd, float)
#print del_bd
if __name__ == "__main__":
unittest.main()
#suite = unittest.TestLoader().loadTestsFromTestCase(ValenceIonicRadiusEvaluatorTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(InterstitialTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(VacancyTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(VacancyFormationEnergyTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(InterstitialAnalyzerTest)
#unittest.TextTestRunner(verbosity=3).run(suite)
|
|
import base64
import struct
from typing import Any, Dict, List, Optional
# This file is adapted from samples/shellinabox/ssh-krb-wrapper in
# https://github.com/davidben/webathena, which has the following
# license:
#
# Copyright (c) 2013 David Benjamin and Alan Huang
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as
# there is already an ASN.1 implementation, but in the interest of
# limiting MIT Kerberos's exposure to malformed ccaches, encode it
# ourselves. To that end, here's the laziest DER encoder ever.
def der_encode_length(length: int) -> bytes:
if length <= 127:
return struct.pack("!B", length)
out = b""
while length > 0:
out = struct.pack("!B", length & 0xFF) + out
length >>= 8
out = struct.pack("!B", len(out) | 0x80) + out
return out
def der_encode_tlv(tag: int, value: bytes) -> bytes:
return struct.pack("!B", tag) + der_encode_length(len(value)) + value
def der_encode_integer_value(val: int) -> bytes:
if not isinstance(val, int):
raise TypeError("int")
# base 256, MSB first, two's complement, minimum number of octets
# necessary. This has a number of annoying edge cases:
# * 0 and -1 are 0x00 and 0xFF, not the empty string.
# * 255 is 0x00 0xFF, not 0xFF
# * -256 is 0xFF 0x00, not 0x00
# Special-case to avoid an empty encoding.
if val == 0:
return b"\x00"
sign = 0 # What you would get if you sign-extended the current high bit.
out = b""
# We can stop once sign-extension matches the remaining value.
while val != sign:
byte = val & 0xFF
out = struct.pack("!B", byte) + out
sign = -1 if byte & 0x80 == 0x80 else 0
val >>= 8
return out
def der_encode_integer(val: int) -> bytes:
return der_encode_tlv(0x02, der_encode_integer_value(val))
def der_encode_int32(val: int) -> bytes:
if val < -2147483648 or val > 2147483647:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_uint32(val: int) -> bytes:
if val < 0 or val > 4294967295:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_string(val: str) -> bytes:
if not isinstance(val, str):
raise TypeError("unicode")
return der_encode_tlv(0x1B, val.encode())
def der_encode_octet_string(val: bytes) -> bytes:
if not isinstance(val, bytes):
raise TypeError("bytes")
return der_encode_tlv(0x04, val)
def der_encode_sequence(tlvs: List[Optional[bytes]], tagged: bool = True) -> bytes:
body = []
for i, tlv in enumerate(tlvs):
# Missing optional elements represented as None.
if tlv is None:
continue
if tagged:
# Assume kerberos-style explicit tagging of components.
tlv = der_encode_tlv(0xA0 | i, tlv)
body.append(tlv)
return der_encode_tlv(0x30, b"".join(body))
def der_encode_ticket(tkt: Dict[str, Any]) -> bytes:
return der_encode_tlv(
0x61, # Ticket
der_encode_sequence(
[
der_encode_integer(5), # tktVno
der_encode_string(tkt["realm"]),
der_encode_sequence( # PrincipalName
[
der_encode_int32(tkt["sname"]["nameType"]),
der_encode_sequence(
[der_encode_string(c) for c in tkt["sname"]["nameString"]], tagged=False
),
]
),
der_encode_sequence( # EncryptedData
[
der_encode_int32(tkt["encPart"]["etype"]),
(
der_encode_uint32(tkt["encPart"]["kvno"])
if "kvno" in tkt["encPart"]
else None
),
der_encode_octet_string(base64.b64decode(tkt["encPart"]["cipher"])),
]
),
]
),
)
# Kerberos ccache writing code. Using format documentation from here:
# https://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
def ccache_counted_octet_string(data: bytes) -> bytes:
if not isinstance(data, bytes):
raise TypeError("bytes")
return struct.pack("!I", len(data)) + data
def ccache_principal(name: Dict[str, str], realm: str) -> bytes:
header = struct.pack("!II", name["nameType"], len(name["nameString"]))
return (
header
+ ccache_counted_octet_string(realm.encode())
+ b"".join(ccache_counted_octet_string(c.encode()) for c in name["nameString"])
)
def ccache_key(key: Dict[str, str]) -> bytes:
return struct.pack("!H", key["keytype"]) + ccache_counted_octet_string(
base64.b64decode(key["keyvalue"])
)
def flags_to_uint32(flags: List[str]) -> int:
ret = 0
for i, v in enumerate(flags):
if v:
ret |= 1 << (31 - i)
return ret
def ccache_credential(cred: Dict[str, Any]) -> bytes:
out = ccache_principal(cred["cname"], cred["crealm"])
out += ccache_principal(cred["sname"], cred["srealm"])
out += ccache_key(cred["key"])
out += struct.pack(
"!IIII",
cred["authtime"] // 1000,
cred.get("starttime", cred["authtime"]) // 1000,
cred["endtime"] // 1000,
cred.get("renewTill", 0) // 1000,
)
out += struct.pack("!B", 0)
out += struct.pack("!I", flags_to_uint32(cred["flags"]))
# TODO: Care about addrs or authdata? Former is "caddr" key.
out += struct.pack("!II", 0, 0)
out += ccache_counted_octet_string(der_encode_ticket(cred["ticket"]))
# No second_ticket.
out += ccache_counted_octet_string(b"")
return out
def make_ccache(cred: Dict[str, Any]) -> bytes:
# Do we need a DeltaTime header? The ccache I get just puts zero
# in there, so do the same.
out = struct.pack(
"!HHHHII",
0x0504, # file_format_version
12, # headerlen
1, # tag (DeltaTime)
8, # taglen (two uint32_ts)
0,
0, # time_offset / usec_offset
)
out += ccache_principal(cred["cname"], cred["crealm"])
out += ccache_credential(cred)
return out
|
|
import asyncio
import warnings
from typing import Dict, Optional, Tuple
from mitmproxy import command, controller, ctx, exceptions, flow, http, log, master, options, platform, tcp, websocket
from mitmproxy.flow import Error, Flow
from mitmproxy.proxy import commands, events, server_hooks
from mitmproxy.proxy import server
from mitmproxy.proxy.layers.tcp import TcpMessageInjected
from mitmproxy.proxy.layers.websocket import WebSocketMessageInjected
from mitmproxy.utils import asyncio_utils, human
from wsproto.frame_protocol import Opcode
class AsyncReply(controller.Reply):
"""
controller.Reply.q.get() is blocking, which we definitely want to avoid in a coroutine.
This stub adds a .done asyncio.Event() that can be used instead.
"""
def __init__(self, *args):
self.done = asyncio.Event()
self.loop = asyncio.get_event_loop()
super().__init__(*args)
def commit(self):
super().commit()
try:
self.loop.call_soon_threadsafe(lambda: self.done.set())
except RuntimeError: # pragma: no cover
pass # event loop may already be closed.
def kill(self, force=False): # pragma: no cover
warnings.warn("reply.kill() is deprecated, set the error attribute instead.", DeprecationWarning, stacklevel=2)
self.obj.error = flow.Error(Error.KILLED_MESSAGE)
class ProxyConnectionHandler(server.StreamConnectionHandler):
master: master.Master
def __init__(self, master, r, w, options):
self.master = master
super().__init__(r, w, options)
self.log_prefix = f"{human.format_address(self.client.peername)}: "
async def handle_hook(self, hook: commands.StartHook) -> None:
with self.timeout_watchdog.disarm():
# We currently only support single-argument hooks.
data, = hook.args()
data.reply = AsyncReply(data)
await self.master.addons.handle_lifecycle(hook)
await data.reply.done.wait()
data.reply = None
def log(self, message: str, level: str = "info") -> None:
x = log.LogEntry(self.log_prefix + message, level)
x.reply = controller.DummyReply() # type: ignore
asyncio_utils.create_task(
self.master.addons.handle_lifecycle(log.AddLogHook(x)),
name="ProxyConnectionHandler.log"
)
class Proxyserver:
"""
This addon runs the actual proxy server.
"""
server: Optional[asyncio.AbstractServer]
listen_port: int
master: master.Master
options: options.Options
is_running: bool
_connections: Dict[Tuple, ProxyConnectionHandler]
def __init__(self):
self._lock = asyncio.Lock()
self.server = None
self.is_running = False
self._connections = {}
def __repr__(self):
return f"ProxyServer({'running' if self.server else 'stopped'}, {len(self._connections)} active conns)"
def load(self, loader):
loader.add_option(
"connection_strategy", str, "eager",
"Determine when server connections should be established. When set to lazy, mitmproxy "
"tries to defer establishing an upstream connection as long as possible. This makes it possible to "
"use server replay while being offline. When set to eager, mitmproxy can detect protocols with "
"server-side greetings, as well as accurately mirror TLS ALPN negotiation.",
choices=("eager", "lazy")
)
loader.add_option(
"stream_large_bodies", Optional[str], None,
"""
Stream data to the client if response body exceeds the given
threshold. If streamed, the body will not be stored in any way.
Understands k/m/g suffixes, i.e. 3m for 3 megabytes.
"""
)
loader.add_option(
"body_size_limit", Optional[str], None,
"""
Byte size limit of HTTP request and response bodies. Understands
k/m/g suffixes, i.e. 3m for 3 megabytes.
"""
)
loader.add_option(
"keep_host_header", bool, False,
"""
Reverse Proxy: Keep the original host header instead of rewriting it
to the reverse proxy target.
"""
)
loader.add_option(
"proxy_debug", bool, False,
"Enable debug logs in the proxy core.",
)
def running(self):
self.master = ctx.master
self.options = ctx.options
self.is_running = True
self.configure(["listen_port"])
def configure(self, updated):
if "stream_large_bodies" in updated:
try:
human.parse_size(ctx.options.stream_large_bodies)
except ValueError:
raise exceptions.OptionsError(f"Invalid stream_large_bodies specification: "
f"{ctx.options.stream_large_bodies}")
if "body_size_limit" in updated:
try:
human.parse_size(ctx.options.body_size_limit)
except ValueError:
raise exceptions.OptionsError(f"Invalid body_size_limit specification: "
f"{ctx.options.body_size_limit}")
if not self.is_running:
return
if "mode" in updated and ctx.options.mode == "transparent": # pragma: no cover
platform.init_transparent_mode()
if any(x in updated for x in ["server", "listen_host", "listen_port"]):
asyncio.create_task(self.refresh_server())
async def refresh_server(self):
async with self._lock:
if self.server:
await self.shutdown_server()
self.server = None
if ctx.options.server:
if not ctx.master.addons.get("nextlayer"):
ctx.log.warn("Warning: Running proxyserver without nextlayer addon!")
self.server = await asyncio.start_server(
self.handle_connection,
self.options.listen_host,
self.options.listen_port,
)
addrs = {f"http://{human.format_address(s.getsockname())}" for s in self.server.sockets}
ctx.log.info(f"Proxy server listening at {' and '.join(addrs)}")
async def shutdown_server(self):
ctx.log.info("Stopping server...")
self.server.close()
await self.server.wait_closed()
self.server = None
async def handle_connection(self, r, w):
peername = w.get_extra_info('peername')
asyncio_utils.set_task_debug_info(
asyncio.current_task(),
name=f"Proxyserver.handle_connection",
client=peername,
)
handler = ProxyConnectionHandler(
self.master,
r,
w,
self.options
)
self._connections[peername] = handler
try:
await handler.handle_client()
finally:
del self._connections[peername]
def inject_event(self, event: events.MessageInjected):
if event.flow.client_conn.peername not in self._connections:
raise ValueError("Flow is not from a live connection.")
self._connections[event.flow.client_conn.peername].server_event(event)
@command.command("inject.websocket")
def inject_websocket(self, flow: Flow, to_client: bool, message: bytes, is_text: bool = True):
if not isinstance(flow, http.HTTPFlow) or not flow.websocket:
ctx.log.warn("Cannot inject WebSocket messages into non-WebSocket flows.")
msg = websocket.WebSocketMessage(
Opcode.TEXT if is_text else Opcode.BINARY,
not to_client,
message
)
event = WebSocketMessageInjected(flow, msg)
try:
self.inject_event(event)
except ValueError as e:
ctx.log.warn(str(e))
@command.command("inject.tcp")
def inject_tcp(self, flow: Flow, to_client: bool, message: bytes):
if not isinstance(flow, tcp.TCPFlow):
ctx.log.warn("Cannot inject TCP messages into non-TCP flows.")
event = TcpMessageInjected(flow, tcp.TCPMessage(not to_client, message))
try:
self.inject_event(event)
except ValueError as e:
ctx.log.warn(str(e))
def server_connect(self, ctx: server_hooks.ServerConnectionHookData):
assert ctx.server.address
self_connect = (
ctx.server.address[1] == self.options.listen_port
and
ctx.server.address[0] in ("localhost", "127.0.0.1", "::1", self.options.listen_host)
)
if self_connect:
ctx.server.error = "Stopped mitmproxy from recursively connecting to itself."
|
|
"""
hash_table.py
Python implementation of the very simple, fixed-array hash table
used for the audfprint fingerprinter.
2014-05-25 Dan Ellis [email protected]
"""
from __future__ import print_function
import numpy as np
import random
import cPickle as pickle
import os, gzip
import scipy.io
import math
# Current format version
HT_VERSION = 20140920
# Earliest acceptable version
HT_COMPAT_VERSION = 20140920
def _bitsfor(maxval):
""" Convert a maxval into a number of bits (left shift).
Raises a ValueError if the maxval is not a power of 2. """
maxvalbits = int(round(math.log(maxval)/math.log(2)))
if maxval != (1 << maxvalbits):
raise ValueError("maxval must be a power of 2, not %d" % maxval)
return maxvalbits
class HashTable(object):
"""
Simple hash table for storing and retrieving fingerprint hashes.
:usage:
>>> ht = HashTable(size=2**10, depth=100)
>>> ht.store('identifier', list_of_landmark_time_hash_pairs)
>>> list_of_ids_tracks = ht.get_hits(hash)
"""
def __init__(self, filename=None, hashbits=20, depth=100, maxtime=16384):
""" allocate an empty hash table of the specified size """
if filename is not None:
self.params = self.load(filename)
else:
self.hashbits = hashbits
self.depth = depth
self.maxtimebits = _bitsfor(maxtime)
# allocate the big table
size = 2**hashbits
self.table = np.zeros((size, depth), dtype=np.uint32)
# keep track of number of entries in each list
self.counts = np.zeros(size, dtype=np.int32)
# map names to IDs
self.names = []
# track number of hashes stored per id
self.hashesperid = np.zeros(0, np.uint32)
# Empty params
self.params = {}
# Record the current version
self.ht_version = HT_VERSION
# Mark as unsaved
self.dirty = True
def reset(self):
""" Reset to empty state (but preserve parameters) """
self.table[:,:] = 0
self.counts[:] = 0
self.names = []
self.hashesperid.resize(0)
self.dirty = True
def store(self, name, timehashpairs):
""" Store a list of hashes in the hash table
associated with a particular name (or integer ID) and time.
"""
id_ = self.name_to_id(name, add_if_missing=True)
# Now insert the hashes
hashmask = (1 << self.hashbits) - 1
#mxtime = self.maxtime
maxtime = 1 << self.maxtimebits
timemask = maxtime - 1
# Try sorting the pairs by hash value, for better locality in storing
#sortedpairs = sorted(timehashpairs, key=lambda x:x[1])
sortedpairs = timehashpairs
# Tried making it an np array to permit vectorization, but slower...
#sortedpairs = np.array(sorted(timehashpairs, key=lambda x:x[1]),
# dtype=int)
# Keep only the bottom part of the time value
#sortedpairs[:,0] = sortedpairs[:,0] % self.maxtime
# Keep only the bottom part of the hash value
#sortedpairs[:,1] = sortedpairs[:,1] & hashmask
idval = id_ << self.maxtimebits
for time_, hash_ in sortedpairs:
# How many already stored for this hash?
count = self.counts[hash_]
# Keep only the bottom part of the time value
#time_ %= mxtime
time_ &= timemask
# Keep only the bottom part of the hash value
hash_ &= hashmask
# Mixin with ID
val = (idval + time_) #.astype(np.uint32)
if count < self.depth:
# insert new val in next empty slot
#slot = self.counts[hash_]
self.table[hash_, count] = val
else:
# Choose a point at random
slot = random.randint(0, count)
# Only store if random slot wasn't beyond end
if slot < self.depth:
self.table[hash_, slot] = val
# Update record of number of vals in this bucket
self.counts[hash_] = count + 1
# Record how many hashes we (attempted to) save for this id
self.hashesperid[id_] += len(timehashpairs)
# Mark as unsaved
self.dirty = True
def get_entry(self, hash_):
""" Return np.array of [id, time] entries
associate with the given hash as rows.
"""
vals = self.table[hash_, :min(self.depth, self.counts[hash_])]
maxtimemask = (1 << self.matimebits) - 1
ids = vals >> self.maxtimebits
return np.c_[ids, vals & maxtimemask].astype(np.int32)
def get_hits_orig(self, hashes):
""" Return np.array of [id, delta_time, hash, time] rows
associated with each element in hashes array of [time, hash] rows.
This is the original version that actually calls get_entry().
"""
# Allocate to largest possible number of hits
hits = np.zeros((np.shape(hashes)[0]*self.depth, 4), np.int32)
nhits = 0
# Fill in
for time_, hash_ in hashes:
idstimes = self.get_entry(hash_)
nids = np.shape(idstimes)[0]
hitrows = nhits + np.arange(nids)
hits[hitrows, 0] = idstimes[:, 0]
hits[hitrows, 1] = idstimes[:, 1] - time_
hits[hitrows, 2] = hash_
hits[hitrows, 3] = time_
nhits += nids
# Discard the excess rows
hits.resize( (nhits, 4) )
return hits
def get_hits(self, hashes):
""" Return np.array of [id, delta_time, hash, time] rows
associated with each element in hashes array of [time, hash] rows.
This version has get_entry() inlined, it's about 30% faster.
"""
# Allocate to largest possible number of hits
nhashes = np.shape(hashes)[0]
hits = np.zeros((nhashes*self.depth, 4), np.int32)
nhits = 0
maxtimemask = (1 << self.maxtimebits) - 1
# Fill in
for ix in xrange(nhashes):
time_ = hashes[ix][0]
hash_ = hashes[ix][1]
nids = min(self.depth, self.counts[hash_])
tabvals = self.table[hash_, :nids]
hitrows = nhits + np.arange(nids)
hits[hitrows, 0] = tabvals >> self.maxtimebits
hits[hitrows, 1] = (tabvals & maxtimemask) - time_
hits[hitrows, 2] = hash_
hits[hitrows, 3] = time_
nhits += nids
# Discard the excess rows
hits.resize( (nhits, 4) )
return hits
def save(self, name, params=None):
""" Save hash table to file <name>,
including optional addition params
"""
# Merge in any provided params
if params:
for key in params:
self.params[key] = params[key]
with gzip.open(name, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
self.dirty = False
nhashes = sum(self.counts)
print("Saved fprints for", sum(n is not None for n in self.names),
"files (", nhashes, "hashes) to", name)
# Report the proportion of dropped hashes (overfull table)
dropped = nhashes - sum(np.minimum(self.depth, self.counts))
print("Dropped hashes=", dropped, "(%.2f%%)" % (
100.0*dropped/max(1, nhashes)))
def load(self, name):
""" Read either pklz or mat-format hash table file """
ext = os.path.splitext(name)[1]
if ext == '.mat':
params = self.load_matlab(name)
else:
params = self.load_pkl(name)
print("Read fprints for", sum(n is not None for n in self.names),
"files (", sum(self.counts), "hashes) from", name)
return params
def load_pkl(self, name):
""" Read hash table values from file <name>, return params """
with gzip.open(name, 'rb') as f:
temp = pickle.load(f)
assert temp.ht_version >= HT_COMPAT_VERSION
params = temp.params
self.hashbits = temp.hashbits
self.depth = temp.depth
if hasattr(temp, 'maxtimebits'):
self.maxtimebits = temp.maxtimebits
else:
self.maxtimebits = _bitsfor(temp.maxtime)
self.table = temp.table
self.counts = temp.counts
self.names = temp.names
self.hashesperid = np.array(temp.hashesperid).astype(np.uint32)
self.ht_version = temp.ht_version
self.dirty = False
return params
def load_matlab(self, name):
""" Read hash table from version saved by Matlab audfprint.
:params:
name : str
filename of .mat matlab fp dbase file
:returns:
params : dict
dictionary of parameters from the Matlab file including
'mat_version' : float
version read from Matlab file (must be >= 0.90)
'hoptime' : float
hoptime read from Matlab file (must be 0.02322)
'targetsr' : float
target sampling rate from Matlab file (must be 11025)
"""
mht = scipy.io.loadmat(name)
params = {}
params['mat_version'] = mht['HT_params'][0][0][-1][0][0]
assert params['mat_version'] >= 0.9
self.hashbits = _bitsfor(mht['HT_params'][0][0][0][0][0])
self.depth = mht['HT_params'][0][0][1][0][0]
self.maxtimebits = _bitsfor(mht['HT_params'][0][0][2][0][0])
params['hoptime'] = mht['HT_params'][0][0][3][0][0]
params['targetsr'] = mht['HT_params'][0][0][4][0][0]
params['nojenkins'] = mht['HT_params'][0][0][5][0][0]
# Python doesn't support the (pointless?) jenkins hashing
assert params['nojenkins']
self.table = mht['HashTable'].T
self.counts = mht['HashTableCounts'][0]
self.names = [str(val[0]) if len(val) > 0 else []
for val in mht['HashTableNames'][0]]
self.hashesperid = np.array(mht['HashTableLengths'][0]).astype(uint32)
# Matlab uses 1-origin for the IDs in the hashes, so rather than
# rewrite them all, we shift the corresponding decode tables
# down one cell
self.names.insert(0, '')
self.hashesperid = np.append([0], self.hashesperid)
# Otherwise unmodified database
self.dirty = False
return params
def totalhashes(self):
""" Return the total count of hashes stored in the table """
return np.sum(self.counts)
def merge(self, ht):
""" Merge in the results from another hash table """
# All the items go into our table, offset by our current size
# Check compatibility
assert self.maxtimebits == ht.maxtimebits
ncurrent = len(self.names)
#size = len(self.counts)
self.names += ht.names
self.hashesperid = np.append(self.hashesperid, ht.hashesperid)
# All the table values need to be increased by the ncurrent
idoffset = (1 << self.maxtimebits) * ncurrent
for hash_ in np.nonzero(ht.counts)[0]:
allvals = np.r_[self.table[hash_, :self.counts[hash_]],
ht.table[hash_, :ht.counts[hash_]] + idoffset]
# ht.counts[hash_] may be more than the actual number of
# hashes we obtained, if ht.counts[hash_] > ht.depth.
# Subselect based on actual size.
if len(allvals) > self.depth:
# Our hash bin is filled: randomly subselect the
# hashes, and update count to accurately track the
# total number of hashes we've seen for this bin.
somevals = np.random.permutation(allvals)[:self.depth]
self.table[hash_, ] = somevals
self.counts[hash_] += ht.counts[hash_]
else:
# Our bin isn't full. Store all the hashes, and
# accurately track how many values it contains. This
# may mean some of the hashes counted for full buckets
# in ht are "forgotten" if ht.depth < self.depth.
self.table[hash_, :len(allvals)] = allvals
self.counts[hash_] = len(allvals)
self.dirty = True
def name_to_id(self, name, add_if_missing=False):
""" Lookup name in the names list, or optionally add. """
if type(name) is str:
# lookup name or assign new
if name not in self.names:
if not add_if_missing:
raise ValueError("name " + name + " not found")
# Use an empty slot in the list if one exists.
try:
id_ = self.names.index(None)
self.names[id_] = name
self.hashesperid[id_] = 0
except ValueError:
self.names.append(name)
self.hashesperid = np.append(self.hashesperid, [0])
id_ = self.names.index(name)
else:
# we were passed in a numerical id
id_ = name
return id_
def remove(self, name):
""" Remove all data for named entity from the hash table. """
id_ = self.name_to_id(name)
# If we happen to be removing the first item (id_ == 0), this will
# match every empty entry in table. This is very inefficient, but
# it still works, and it's just one ID. We could have fixed it by
# making the IDs written in to table start an 1, but that would mess
# up backwards compatibility.
id_in_table = (self.table >> self.maxtimebits) == id_
hashes_removed = 0
for hash_ in np.nonzero(np.max(id_in_table, axis=1))[0]:
vals = self.table[hash_, :self.counts[hash_]]
vals = [v for v, x in zip(vals, id_in_table[hash_])
if not x]
self.table[hash_] = np.hstack([vals,
np.zeros(self.depth - len(vals))])
# This will forget how many extra hashes we had dropped until now.
self.counts[hash_] = len(vals)
hashes_removed += np.sum(id_in_table[hash_])
self.names[id_] = None
self.hashesperid[id_] = 0
self.dirty = True
print("Removed", name, "(", hashes_removed, "hashes).")
def retrieve(self, name):
"""Return a list of (time, hash) pairs by finding them in the table."""
timehashpairs = []
id_ = self.name_to_id(name)
maxtimemask = (1 << self.maxtimebits) - 1
# Still a bug for id_ 0.
hashes_containing_id = np.nonzero(
np.max((self.table >> self.maxtimebits) == id_, axis=1))[0]
for hash_ in hashes_containing_id:
entries = self.table[hash_, :self.counts[hash_]]
matching_entries = np.nonzero(
(entries >> self.maxtimebits) == id_)[0]
times = (entries[matching_entries] & maxtimemask)
timehashpairs.extend([(time, hash_) for time in times])
return timehashpairs
def list(self, print_fn=None):
""" List all the known items. """
if not print_fn:
print_fn = print
for name, count in zip(self.names, self.hashesperid):
if name:
print_fn(name + " (" + str(count) + " hashes)")
|
|
#
# authen.py
# Mixin providing various approaches to authentication
#
import sys, os, time, base64, string, threading, json
import socket, grp
# Extra chars from base64
ALTCHARS="_+"
# Why Python lacks isxdigit(), dunno
Xdigits = set(string.digits)
Xdigits.update( ['a', 'b', 'c', 'd', 'e', 'f'] )
Xdigits.update( ['A', 'B', 'C', 'D', 'E', 'F'] )
Xdigits = frozenset(Xdigits)
# Re-login every 30 days
LoginInterval = 60*60*24*30
# Non-method version for top-level servers
def do_parse_cookie(s):
global Xdigits
# Require "<user>-<cookie>"
tup = s.split('-')
if (len(tup) != 2) or any(not t for t in tup):
return None
# User is letters and digits
user,cookie = tup
if any((not c.isalnum) for c in user):
return None
# Cookie is a hex value
if any((c not in Xdigits) for c in cookie):
return None
return tup
# Wrapper for all the state of dealing with an
# account server
# When a server derives its authentication from an account server, it
# creates one of these to hold all the state of using that server.
class AccountServer(object):
def __init__(self, approot):
self.approot = approot
# Socket open to account server
self.acct_server = None
# Path name to our own Unix-domain socket (UDS)
self.path = None
# Unique index for an op
self.serial = 0L
# For pending operation, serial->mutex. For completed
# operation, serial->response.
self.ops = {}
# Barrier to when we're registered with accounts server
self.ready = False
# [http,https] ports for accounts server
self.ports = None
# Our PID, mostly for debugging
self.pid = os.getpid()
# Name of our service, for authorization
self.service = approot.config["service"]
# Common code to launch a request to the server,
# and optionally get back an answer
def send_server(self, op, req, await):
# Add on mandatory fields
req["op"] = op
req["reply-to"] = self.path
req["service"] = self.service
req["pid"] = self.pid
ser = req["serial"] = self.serial
self.serial += 1
if await:
# Register for this op to have a completion release
# We make sure it's in place before any completion
# could possibly return.
mutex = threading.Semaphore(0)
self.ops[ser] = (mutex, req)
# Send out the request
self.acct_server.send(json.dumps(req))
# No waiting
if not await:
return None
# Hold here for completion
mutex.acquire()
# Now we're back with a result. It's basically
# a copy of our request with a "response" field
# (and, somtimes, a few other added fields)
tup = self.ops[ser]
del self.ops[ser]
return tup[1]
# Do initial registration, then endlessly serve
# operations back from the account server.
def serve_account(self):
# Decode HTTP/HTTPS config for port #'s used
approot = self.approot
cfg = approot.config
# Establish our own UDS endpoint
self.path = "/tmp/%s-%s" % \
(cfg["service"], cfg.get("domain", "chore"))
try:
# File from previous run
os.unlink(self.path)
except:
pass
s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
os.unlink(self.path)
except:
pass
s.bind(self.path)
os.chmod(self.path, 0660)
os.chown(self.path, os.getuid(),
grp.getgrnam(cfg.get("group", "chore")).gr_gid)
# Register with the account server
req = {"service": cfg["service"],
"port": approot.http_ports()}
sys.stderr.write("Start account server connection\n")
self.send_server("start", req, False)
# And stay here, servicing our own UDS port
while True:
sys.stderr.write(" account server loop\n")
# Next operation
buf = s.recv(65536)
sys.stderr.write("serve_account got %s\n" % (buf,))
# Decode and sanity check
try:
req = json.loads(buf)
except:
sys.stderr.write("Invalid UDS: %s\n" % (buf,))
continue
op = req.get("op")
if op is None:
sys.stderr.write("No operation: %s\n" % (buf,))
continue
# Is it a response?
if "result" in req:
ser = req["serial"]
# Our server ack's our registration
if op == "start":
self.ports = tuple(req["port"])
self.ready = True
continue
# Done with this op
# If they're waiting, release them and
# also swap for request version of JSON
# to the result.
if ser in self.ops:
# (mutex, request-dict)
tup = self.ops[ser]
# Update with response-dict
self.ops[ser] = (tup[0], req)
# And release the waiting thread
tup[0].release()
# Hmmm.
else:
sys.stderr.write("Account response not result: %s\n" %
(req,))
# Set up to use an account server
# Connect to it, get its paramters, and spin up a thread
# to serve our side.
def setup_account_server(self):
# Connection to account server
if self.acct_server is None:
approot = self.approot
cfg = approot.config
dom = cfg.get("domain", "chore")
s = self.acct_server = \
socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sname = "/tmp/acct-%s" % (dom,)
sys.stderr.write("Connect to account server\n")
s.connect(sname)
# Our service thread
# (It'll do the initial registration of our app
# into the account server.)
t = threading.Thread(target=self.serve_account)
t.start()
# Wait for confirmation with all of its dope
if not self.ready:
sys.stderr.write(" sync wait for account server\n")
while not self.ready:
# Busy-wait until we're connected to the accounts server
time.sleep(0.1)
sys.stderr.write(" Account server ready\n")
# When they need to get logged in, go talk here
# This API serves a couple of purposes. It launches our
# connection the account server on first use. It also
# gates us to make sure that connection is established
# (with answering ack from account server which contains
# needed information). Only then does it return the
# [non-ssl, ssl] port pair so our caller can build a
# suitable redirection URL.
def redir_server(self):
# Hook to sync up with account server
self.setup_account_server()
# Here's how to talk to the account server
return self.ports
# The user has presented a cookie. It isn't cached & valid,
# so we need to ask our account server if it minted this
# cookie.
# Returns (user, cookie, expires-time), or None for failure
def verify_cookie(self, cookie):
# We're going to talk to'em, maybe for the first time
self.setup_account_server()
req = {"cookie": cookie}
res = self.send_server("cookie?", req, True)
if res["result"][0] == '?':
# Login failure
return None
# Per-user config included? Cache it.
user = res["user"]
if "config" in res:
self.approot.uconfig[user] = res["config"]
return (user, cookie, res["expires"])
# Shared authentication state across all connections
# This is part of the root of the app. Authentication-less
# apps don't need it, otherwise it provides ivars and
# methods for both local (file based) authentication, as
# well as system-wide account server authentication.
#
# cookies{} - Map from username to (expires, "cookie value")
# authentication[] - List of authentication methods to try
class Authen_Server_mixin(object):
def __init__(self):
self.cookies = {}
# Cookie name
self.login_token = "loginToken"
# No authentication by default. Init here
# then adds it on.
self.authentication = []
# Get cookie from cache, or filesystem
# Return (cookie, expires-tm) or None if there is no current
# cookie value.
# As a side effect, it detects expired cookies, scrubbing
# them as needed.
def load_cookie(self, user):
# In-memory cache
tup = self.cookies.get(user)
if tup is not None:
sys.stderr.write(" server cached cookie %r\n" % (user,))
ftm = tup[1]
else:
# Filesystem
try:
f = open("var/cookies/" + user, "r")
fcookie = f.readline().strip()
ftm = float(f.readline().strip())
f.close()
sys.stderr.write(" got filesystem cookie %r\n" % (user,))
tup = (fcookie, ftm)
self.cookies[user] = tup
except:
sys.stderr.write(" no saved cookie %r\n" % (user,))
return None
# Expired yet?
if ftm <= time.time():
sys.stderr.write(" cookie expired %r\n" % (user,))
# Scrub from filesystem
try:
os.unlink("var/cookies/" + user)
except:
pass
# And from cache
if user in self.cookies:
del self.cookies[user]
return None
# Here's our cookie with its expiration
return tup
# By default, each cookie for user U is stored in
# the file var/cookies/U. Once looked up, it is stored in
# the self.cookies dict as well.
def valid_cookie(self, user, cookie):
# Any saved cookie?
tup = self.load_cookie(user)
if tup is None:
return False
# Make sure they're talking about the current one
if cookie != tup[0]:
sys.stderr.write(" cookie does not match for %r\n" % (user,))
return False
# Good cookie, yum
sys.stderr.write(" cookie passes checks %r\n" % (user,))
return True
# We're going to use an account server, set up connection
def init_acct_server(self):
assert not hasattr(self, "acct_server")
srv = self.acct_server = AccountServer(self)
srv.setup_account_server()
# Authentication handling for WWW connections
# This is the counterpart to Authen_Server_mixin above; that one
# is common to all connections, this one is part of a particular
# connection. It supplies the methods a connection would use
# to handle authentication.
class Authen_mixin(object):
# Each mixin gets this hook
def __init__(self):
pass
# Default, don't disable authentication
def handle_noauth(self):
# By default, let them see the site icon
if (self.command == "GET") and (self.path == "/favicon.ico"):
return True
return False
# Apply current authentication
#
# Various return formats:
# "string" - 200 response with this string as body
# int,str - Given response # with str as error message
# True - Authen OK, let rest of WWW handling proceed
def authenticate(self):
srv = self.server
approot = srv.approot
# Web server level authentication?
authlist = srv.authentication
if authlist is None:
authlist = approot.authentication
# No authen, keep going
if not authlist:
return True
# Authen bypassed, keep going
if self.handle_noauth():
return True
# Find first authen method which produces an
# answer
for auth in authlist:
# The server has an unbound function, so invoke
# it as bound to us.
res = auth(self)
# The result is supplied
if res is not None:
if res is True:
# Hook to notice a user allowed on.
self.auth_ok()
return res
# Generic authen failed
return 401,None
# Default hook: no-op
# When somebody successfully authenticates, this gets called
def auth_ok(self):
pass
# Save cookie; record for ongoing use now, and
# try to save a cookie across server restarts
# Default (simple) implementation: in the filesystem
def save_cookie(self, user, cookie, tm):
srv = self.server
approot = srv.approot
approot.cookies[user] = (cookie, tm)
try:
# Save to filesystem too
f = open("var/cookies/" + user, "w")
f.write(cookie + "\n" + str(tm) + "\n")
f.close()
except:
# Oh, well. It's good for the session.
pass
# Supply default cookie expiration
def cookie_expires(self):
global LoginInterval
return LoginInterval
# Default, 16-byte cookies (128 bits)
def cookie_bytes(self):
return 16
# Place this cookie into our header
def cookie_header(self, user, cookie, expires):
srv = self.server
approot = srv.approot
# Header format of expiration time
tm = time.strftime("%a, %d %b %Y %T GMT", time.gmtime(expires))
if srv.ssl:
# Be more circumspect when using SSL; clear usually means
# you're using your media server at home. SSL means
# you're out in the Big Bad World.
extras = '; Secure; HttpOnly'
else:
# Note Secure on non-SSL just makes the cookie unusable,
# since you've given them a cookie they can never send
# back.
extras = ''
# The user's stored cookie is encoding "<user>-<cookie>" to
# tie account name together with cookie value.
self.extra_headers.append(
("Set-Cookie",
"%s=%s; Expires=%s; Path=/%s" %
(approot.login_token, user + "-" + cookie, tm, extras)) )
# We need a new cookie
# Fills in to approot.cookies{}
def new_cookie(self, user):
srv = self.server
approot = srv.approot
# If this is a new login (device, or local versus remote),
# use any existing cookie first.
tup = approot.load_cookie(user)
if tup is not None:
cookie,expires = tup
else:
# Generate hex representation of cookie value
cookie = ""
for x in xrange(self.cookie_bytes()):
cookie += ("%02x" % (ord(os.urandom(1)),))
# Expires some day
expires = time.time() + self.cookie_expires()
# Save it into short- and long-term storage
self.save_cookie(user, cookie, expires)
# Put it into our header
self.cookie_header(user, cookie, expires)
return cookie
# Parse the cookie from our expected format
# Returns (uname, cookie-part)
# (If the cookie doesn't encode a user account, this
# method can be overridden and a user value of
# None is fine.)
def parse_cookie(self, s):
return do_parse_cookie(s)
# Try to authenticate with a cookie
# Returns True on ok cookie authentication, False
# on failure.
def auth_cookie(self):
approot = self.server.approot
# See if they can satisfy us with an authentication cookie
sys.stderr.write("authen cookie\n")
cookie = self.get_cookie(approot.login_token)
if cookie is None:
sys.stderr.write(" no cookie\n")
return False
# Defensively parse cookie from header
tup = self.parse_cookie(cookie)
if tup is None:
sys.stderr.write(" malformed cookie %r\n" % (cookie,))
return False
user,cookie = tup
# Look up
if approot.valid_cookie(user, cookie):
sys.stderr.write(" cookie OK %s\n" % (user,))
self.user = str(user)
return True
sys.stderr.write(" cookie, but not known %s\n" % (user,))
return False
# Use this to always let them through
def auth_accept(self):
return True
# Generate a 401 error return along with the
# needed Authenticate header
def send401(self, msg=None):
if msg is None:
msg = "Authentication Required"
approot = self.server.approot
appname = approot.config.get("service", "Web Application Interface")
self.extra_headers.append(
('WWW-Authenticate', 'Basic realm="%s"' % (appname,)) )
return (401,msg)
# Use user/pass from accounts file
def auth_password(self):
sys.stderr.write("authen password\n")
# We're going to need a user/pass
auth = self.headers.getheader('Authorization')
if auth is None:
sys.stderr.write(" request authen\n")
return self.send401("Authentication required")
sys.stderr.write(" authen present\n")
# Do we accept it?
auth = base64.b64decode(auth[6:], altchars=ALTCHARS)
tup = auth.split(":")
user,pw = tup
if (len(tup) != 2) or (not user) or (not pw):
return self.send401(" Malformed authentication response")
if not self.check_password(user, pw):
return self.send401(" Incorrect username or password")
# Generate new cookie, record
self.user = str(user)
sys.stderr.write(" password OK %r\n" % (user,))
cookie = self.new_cookie(user)
# Welcome on
return True
# Look up the username in a file, see if it's the right password
# Return True if they are OK, else 401 error
#
# This will often be overridden by the app, which will have
# its own password storage technique.
def check_password(self, user, pw):
srv = self.server
accts = srv.config["accounts"]
f = open(accts, "r")
for l in f:
l = l.strip()
if (not l) or l.startswith("#"):
continue
# <user> <pass> [ <app-specific>... ]
tup = l.split()
if len(tup) < 2:
continue
# For this user, do you know the password?
if tup[0] == user:
f.close()
if tup[1] == pw:
return True
return self.send401("Incorrect username or password\r\n")
f.close()
return self.send401("Incorrect username or password\r\n")
# Authentication type where we use a central account
# server for the host.
def auth_server(self):
# First see if we're already good to go
if self.auth_cookie():
return True
# See if they're presenting a cookie
srv = self.server
approot = srv.approot
aserver = approot.acct_server
cookie = self.get_cookie(approot.login_token)
if cookie is not None:
# Ask the account server if it's OK?
tup = aserver.verify_cookie(cookie)
if tup is not None:
# Cookie verified, consider them logged in
user,cookie,exptm = tup
self.user = str(user)
# Trim to non-user format of cookie
assert cookie.startswith(user + '-')
cookie = cookie[(len(user)+1):]
# Cache it
approot.cookies[user] = (cookie, exptm)
return True
# Go talk to the account server, log in, then we'll
# see you back here.
s_ports = aserver.redir_server()
desturl = "http%s://%s:%d" % \
(("s" if srv.ssl else ""),
self.headers.get("host").split(":")[0],
s_ports[1 if srv.ssl else 0])
res = self.gen_redir(desturl)
return res
|
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from mounts import Mounts
import datetime
import threading
import os
import time
import sys
import signal
import traceback
import threading
def thread_for_binary(self,args):
self.logger.log("Thread for binary is called",True)
time.sleep(5)
self.logger.log("Waited in thread for 5 seconds",True)
self.child = subprocess.Popen(args,stdout=subprocess.PIPE)
self.logger.log("Binary subprocess Created",True)
class FreezeError(object):
def __init__(self):
self.errorcode = None
self.fstype = None
self.path = None
def __str__(self):
return "errorcode:" + str(self.errorcode) + " fstype:" + str(self.fstype) + " path" + str(self.path)
class FreezeResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class FreezeHandler(object):
def __init__(self,logger):
# sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)
self.sig_handle = 0
self.child= None
self.logger=logger
def sigusr1_handler(self,signal,frame):
self.logger.log('freezed',False)
self.sig_handle=1
def sigchld_handler(self,signal,frame):
self.logger.log('some child process terminated')
if(self.child is not None and self.child.poll() is not None):
self.logger.log("binary child terminated",True)
self.sig_handle=2
def reset_signals(self):
self.sig_handle = 0
self.child= None
def startproc(self,args):
binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])
binary_thread.start()
for i in range(0,33):
if(self.sig_handle==0):
self.logger.log("inside while with sig_handle "+str(self.sig_handle))
time.sleep(2)
else:
break
self.logger.log("Binary output for signal handled: "+str(self.sig_handle))
return self.sig_handle
def signal_receiver(self):
signal.signal(signal.SIGUSR1,self.sigusr1_handler)
signal.signal(signal.SIGCHLD,self.sigchld_handler)
class FsFreezer:
def __init__(self, patching, logger):
"""
"""
self.patching = patching
self.logger = logger
try:
self.mounts = Mounts(patching = self.patching, logger = self.logger)
except Exception as e:
errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg,True,'Warning')
self.logger.log(str(e), True)
self.mounts = None
self.frozen_items = set()
self.unfrozen_items = set()
self.freeze_handler = FreezeHandler(self.logger)
def should_skip(self, mount):
if((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop'):
return False
else:
return True
def freeze_safe(self,timeout):
self.root_seen = False
error_msg=''
try:
freeze_result = FreezeResult()
freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),"safefreeze/bin/safefreeze")
args=[freezebin,str(timeout)]
arg=[]
for mount in self.mounts.mounts:
self.logger.log("fsfreeze mount :" + str(mount.mount_point), True)
if(mount.mount_point == '/'):
self.root_seen = True
self.root_mount = mount
elif(mount.mount_point and not self.should_skip(mount)):
args.append(str(mount.mount_point))
if(self.root_seen):
args.append('/')
self.logger.log("arg : " + str(args),True)
self.freeze_handler.reset_signals()
self.freeze_handler.signal_receiver()
self.logger.log("proceeded for accepting signals", True)
self.logger.enforce_local_flag(False)
sig_handle=self.freeze_handler.startproc(args)
if(sig_handle != 1):
if (self.freeze_handler.child is not None):
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line,encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
error_msg="freeze failed for some mount"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
except Exception as e:
self.logger.enforce_local_flag(True)
error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
return freeze_result
def thaw_safe(self):
thaw_result = FreezeResult()
unable_to_sleep = False
if(self.freeze_handler.child is None):
self.logger.log("child already completed", True)
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
elif(self.freeze_handler.child.poll() is None):
self.logger.log("child process still running")
self.freeze_handler.child.send_signal(signal.SIGUSR1)
for i in range(0,30):
if(self.freeze_handler.child.poll() is None):
self.logger.log("child still running sigusr1 sent")
time.sleep(1)
else:
break
self.logger.enforce_local_flag(True)
self.logger.log("Binary output after process end: ", True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
if(self.freeze_handler.child.returncode!=0):
error_msg = 'snapshot result inconsistent as child returns with failure'
thaw_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
else:
self.logger.log("Binary output after process end when no thaw sent: ", True)
if(self.freeze_handler.child.returncode==2):
error_msg = 'Unable to execute sleep'
thaw_result.errors.append(error_msg)
unable_to_sleep = True
else:
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
self.logger.enforce_local_flag(True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
self.logger.log(error_msg, True, 'Error')
self.logger.enforce_local_flag(True)
return thaw_result, unable_to_sleep
|
|
# Copyright (c) 2009-2011 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for using NMEA sentences.
"""
import datetime
from operator import attrgetter
from zope.interface import implementer
from twisted.positioning import base, nmea, ipositioning
from twisted.positioning.test.receiver import MockPositioningReceiver
from twisted.trial.unittest import TestCase
from twisted.positioning.base import Angles
# Sample sentences
GPGGA = '$GPGGA,123519,4807.038,N,01131.000,E,1,08,0.9,545.4,M,46.9,M,,*47'
GPRMC = '$GPRMC,123519,A,4807.038,N,01131.000,E,022.4,084.4,230394,003.1,W*6A'
GPGSA = '$GPGSA,A,3,19,28,14,18,27,22,31,39,,,,,1.7,1.0,1.3*34'
GPHDT = '$GPHDT,038.005,T*3B'
GPGLL = '$GPGLL,4916.45,N,12311.12,W,225444,A*31'
GPGLL_PARTIAL = '$GPGLL,3751.65,S,14507.36,E*77'
GPGSV_SINGLE = '$GPGSV,1,1,11,03,03,111,00,04,15,270,00,06,01,010,00,,,,*4b'
GPGSV_EMPTY_MIDDLE = '$GPGSV,1,1,11,03,03,111,00,,,,,,,,,13,06,292,00*75'
GPGSV_SEQ = GPGSV_FIRST, GPGSV_MIDDLE, GPGSV_LAST = """
$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74
$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74
$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D
""".split()
@implementer(ipositioning.INMEAReceiver)
class NMEATestReceiver(object):
"""
An NMEA receiver for testing.
Remembers the last sentence it has received.
"""
def __init__(self):
self.clear()
def clear(self):
"""
Forgets the received sentence (if any), by setting
C{self.receivedSentence} to C{None}.
"""
self.receivedSentence = None
def sentenceReceived(self, sentence):
self.receivedSentence = sentence
class CallbackTests(TestCase):
"""
Tests if the NMEA protocol correctly calls its sentence callback.
@ivar protocol: The NMEA protocol under test.
@type protocol: L{nmea.NMEAProtocol}
@ivar sentenceTypes: The set of sentence types of all sentences the test's
sentence callback function has been called with.
@type sentenceTypes: C{set}
"""
def setUp(self):
receiver = NMEATestReceiver()
self.protocol = nmea.NMEAProtocol(receiver, self._sentenceCallback)
self.sentenceTypes = set()
def _sentenceCallback(self, sentence):
"""
Remembers that a sentence of this type was fired.
"""
self.sentenceTypes.add(sentence.type)
def test_callbacksCalled(self):
"""
The correct callbacks fire, and that *only* those fire.
"""
sentencesByType = {
'GPGGA': ['$GPGGA*56'],
'GPGLL': ['$GPGLL*50'],
'GPGSA': ['$GPGSA*42'],
'GPGSV': ['$GPGSV*55'],
'GPHDT': ['$GPHDT*4f'],
'GPRMC': ['$GPRMC*4b']
}
for sentenceType, sentences in sentencesByType.iteritems():
for sentence in sentences:
self.protocol.lineReceived(sentence)
self.assertEqual(self.sentenceTypes, set([sentenceType]))
self.sentenceTypes.clear()
class BrokenSentenceCallbackTests(TestCase):
"""
Tests for broken NMEA sentence callbacks.
"""
def setUp(self):
receiver = NMEATestReceiver()
self.protocol = nmea.NMEAProtocol(receiver, self._sentenceCallback)
def _sentenceCallback(self, sentence):
"""
Raises C{AttributeError}.
"""
raise AttributeError("ERROR!!!")
def test_dontSwallowCallbackExceptions(self):
"""
An C{AttributeError} in the sentence callback of an C{NMEAProtocol}
doesn't get swallowed.
"""
lineReceived = self.protocol.lineReceived
self.assertRaises(AttributeError, lineReceived, '$GPGGA*56')
class SplitTest(TestCase):
"""
Checks splitting of NMEA sentences.
"""
def test_withChecksum(self):
"""
An NMEA sentence with a checksum gets split correctly.
"""
splitSentence = nmea._split("$GPGGA,spam,eggs*00")
self.assertEqual(splitSentence, ['GPGGA', 'spam', 'eggs'])
def test_noCheckum(self):
"""
An NMEA sentence without a checksum gets split correctly.
"""
splitSentence = nmea._split("$GPGGA,spam,eggs*")
self.assertEqual(splitSentence, ['GPGGA', 'spam', 'eggs'])
class ChecksumTests(TestCase):
"""
NMEA sentence checksum verification tests.
"""
def test_valid(self):
"""
Sentences with valid checksums get validated.
"""
nmea._validateChecksum(GPGGA)
def test_missing(self):
"""
Sentences with missing checksums get validated.
"""
nmea._validateChecksum(GPGGA[:-2])
def test_invalid(self):
"""
Sentences with a bad checksum raise L{base.InvalidChecksum} when
attempting to validate them.
"""
validate = nmea._validateChecksum
bareSentence, checksum = GPGGA.split("*")
badChecksum = "%x" % (int(checksum, 16) + 1)
sentences = ["%s*%s" % (bareSentence, badChecksum)]
for s in sentences:
self.assertRaises(base.InvalidChecksum, validate, s)
class NMEAReceiverSetup(object):
"""
A mixin for tests that need an NMEA receiver (and a protocol attached to
it).
@ivar receiver: An NMEA receiver that remembers the last sentence.
@type receiver: L{NMEATestReceiver}
@ivar protocol: An NMEA protocol attached to the receiver.
@type protocol: L{twisted.positioning.nmea.NMEAProtocol}
"""
def setUp(self):
"""
Sets up an NMEA receiver.
"""
self.receiver = NMEATestReceiver()
self.protocol = nmea.NMEAProtocol(self.receiver)
class GSVSequenceTests(NMEAReceiverSetup, TestCase):
"""
Tests for the interpretation of GSV sequences.
"""
def test_firstSentence(self):
"""
The first sentence in a GSV sequence is correctly identified.
"""
self.protocol.lineReceived(GPGSV_FIRST)
sentence = self.receiver.receivedSentence
self.assertTrue(sentence._isFirstGSVSentence())
self.assertFalse(sentence._isLastGSVSentence())
def test_middleSentence(self):
"""
A sentence in the middle of a GSV sequence is correctly
identified (as being neither the last nor the first).
"""
self.protocol.lineReceived(GPGSV_MIDDLE)
sentence = self.receiver.receivedSentence
self.assertFalse(sentence._isFirstGSVSentence())
self.assertFalse(sentence._isLastGSVSentence())
def test_lastSentence(self):
"""
The last sentence in a GSV sequence is correctly identified.
"""
self.protocol.lineReceived(GPGSV_LAST)
sentence = self.receiver.receivedSentence
self.assertFalse(sentence._isFirstGSVSentence())
self.assertTrue(sentence._isLastGSVSentence())
class BogusSentenceTests(NMEAReceiverSetup, TestCase):
"""
Tests for verifying predictable failure for bogus NMEA sentences.
"""
def assertRaisesOnSentence(self, exceptionClass, sentence):
"""
Asserts that the protocol raises C{exceptionClass} when it receives
C{sentence}.
@param exceptionClass: The exception class expected to be raised.
@type exceptionClass: C{Exception} subclass
@param sentence: The (bogus) NMEA sentence.
@type sentence: C{str}
"""
self.assertRaises(exceptionClass, self.protocol.lineReceived, sentence)
def test_raiseOnUnknownSentenceType(self):
"""
Receiving a well-formed sentence of unknown type raises
C{ValueError}.
"""
self.assertRaisesOnSentence(ValueError, "$GPBOGUS*5b")
def test_raiseOnMalformedSentences(self):
"""
Receiving a malformed sentence raises L{base.InvalidSentence}.
"""
self.assertRaisesOnSentence(base.InvalidSentence, "GPBOGUS")
class NMEASentenceTests(NMEAReceiverSetup, TestCase):
"""
Tests for L{nmea.NMEASentence} objects.
"""
def test_repr(self):
"""
The C{repr} of L{nmea.NMEASentence} objects is correct.
"""
sentencesWithExpectedRepr = [
(GPGSA,
"<NMEASentence (GPGSA) {"
"dataMode: A, "
"fixType: 3, "
"horizontalDilutionOfPrecision: 1.0, "
"positionDilutionOfPrecision: 1.7, "
"usedSatellitePRN_0: 19, "
"usedSatellitePRN_1: 28, "
"usedSatellitePRN_2: 14, "
"usedSatellitePRN_3: 18, "
"usedSatellitePRN_4: 27, "
"usedSatellitePRN_5: 22, "
"usedSatellitePRN_6: 31, "
"usedSatellitePRN_7: 39, "
"verticalDilutionOfPrecision: 1.3"
"}>"),
]
for sentence, expectedRepr in sentencesWithExpectedRepr:
self.protocol.lineReceived(sentence)
received = self.receiver.receivedSentence
self.assertEqual(repr(received), expectedRepr)
class ParsingTests(NMEAReceiverSetup, TestCase):
"""
Tests if raw NMEA sentences get parsed correctly.
This doesn't really involve any interpretation, just turning ugly raw NMEA
representations into objects that are more pleasant to work with.
"""
def _parserTest(self, sentence, expected):
"""
Passes a sentence to the protocol and gets the parsed sentence from
the receiver. Then verifies that the parsed sentence contains the
expected data.
"""
self.protocol.lineReceived(sentence)
received = self.receiver.receivedSentence
self.assertEqual(expected, received._sentenceData)
def test_fullRMC(self):
"""
A full RMC sentence is correctly parsed.
"""
expected = {
'type': 'GPRMC',
'latitudeFloat': '4807.038',
'latitudeHemisphere': 'N',
'longitudeFloat': '01131.000',
'longitudeHemisphere': 'E',
'magneticVariation': '003.1',
'magneticVariationDirection': 'W',
'speedInKnots': '022.4',
'timestamp': '123519',
'datestamp': '230394',
'trueHeading': '084.4',
'dataMode': 'A',
}
self._parserTest(GPRMC, expected)
def test_fullGGA(self):
"""
A full GGA sentence is correctly parsed.
"""
expected = {
'type': 'GPGGA',
'altitude': '545.4',
'altitudeUnits': 'M',
'heightOfGeoidAboveWGS84': '46.9',
'heightOfGeoidAboveWGS84Units': 'M',
'horizontalDilutionOfPrecision': '0.9',
'latitudeFloat': '4807.038',
'latitudeHemisphere': 'N',
'longitudeFloat': '01131.000',
'longitudeHemisphere': 'E',
'numberOfSatellitesSeen': '08',
'timestamp': '123519',
'fixQuality': '1',
}
self._parserTest(GPGGA, expected)
def test_fullGLL(self):
"""
A full GLL sentence is correctly parsed.
"""
expected = {
'type': 'GPGLL',
'latitudeFloat': '4916.45',
'latitudeHemisphere': 'N',
'longitudeFloat': '12311.12',
'longitudeHemisphere': 'W',
'timestamp': '225444',
'dataMode': 'A',
}
self._parserTest(GPGLL, expected)
def test_partialGLL(self):
"""
A partial GLL sentence is correctly parsed.
"""
expected = {
'type': 'GPGLL',
'latitudeFloat': '3751.65',
'latitudeHemisphere': 'S',
'longitudeFloat': '14507.36',
'longitudeHemisphere': 'E',
}
self._parserTest(GPGLL_PARTIAL, expected)
def test_fullGSV(self):
"""
A full GSV sentence is correctly parsed.
"""
expected = {
'type': 'GPGSV',
'GSVSentenceIndex': '1',
'numberOfGSVSentences': '3',
'numberOfSatellitesSeen': '11',
'azimuth_0': '111',
'azimuth_1': '270',
'azimuth_2': '010',
'azimuth_3': '292',
'elevation_0': '03',
'elevation_1': '15',
'elevation_2': '01',
'elevation_3': '06',
'satellitePRN_0': '03',
'satellitePRN_1': '04',
'satellitePRN_2': '06',
'satellitePRN_3': '13',
'signalToNoiseRatio_0': '00',
'signalToNoiseRatio_1': '00',
'signalToNoiseRatio_2': '00',
'signalToNoiseRatio_3': '00',
}
self._parserTest(GPGSV_FIRST, expected)
def test_partialGSV(self):
"""
A partial GSV sentence is correctly parsed.
"""
expected = {
'type': 'GPGSV',
'GSVSentenceIndex': '3',
'numberOfGSVSentences': '3',
'numberOfSatellitesSeen': '11',
'azimuth_0': '067',
'azimuth_1': '311',
'azimuth_2': '244',
'elevation_0': '42',
'elevation_1': '14',
'elevation_2': '05',
'satellitePRN_0': '22',
'satellitePRN_1': '24',
'satellitePRN_2': '27',
'signalToNoiseRatio_0': '42',
'signalToNoiseRatio_1': '43',
'signalToNoiseRatio_2': '00',
}
self._parserTest(GPGSV_LAST, expected)
def test_fullHDT(self):
"""
A full HDT sentence is correctly parsed.
"""
expected = {
'type': 'GPHDT',
'trueHeading': '038.005',
}
self._parserTest(GPHDT, expected)
def test_typicalGSA(self):
"""
A typical GSA sentence is correctly parsed.
"""
expected = {
'type': 'GPGSA',
'dataMode': 'A',
'fixType': '3',
'usedSatellitePRN_0': '19',
'usedSatellitePRN_1': '28',
'usedSatellitePRN_2': '14',
'usedSatellitePRN_3': '18',
'usedSatellitePRN_4': '27',
'usedSatellitePRN_5': '22',
'usedSatellitePRN_6': '31',
'usedSatellitePRN_7': '39',
'positionDilutionOfPrecision': '1.7',
'horizontalDilutionOfPrecision': '1.0',
'verticalDilutionOfPrecision': '1.3',
}
self._parserTest(GPGSA, expected)
class FixUnitsTests(TestCase):
"""
Tests for the generic unit fixing method, L{nmea.NMEAAdapter._fixUnits}.
@ivar adapter: The NMEA adapter.
@type adapter: L{nmea.NMEAAdapter}
"""
def setUp(self):
self.adapter = nmea.NMEAAdapter(base.BasePositioningReceiver())
def test_noValueKey(self):
"""
Tests that when no C{valueKey} is provided, C{unitKey} is used, minus
C{"Units"} at the end.
"""
class FakeSentence(object):
"""
A fake sentence that just has a "foo" attribute.
"""
def __init__(self):
self.foo = 1
self.adapter.currentSentence = FakeSentence()
self.adapter._fixUnits(unitKey="fooUnits", unit="N")
self.assertNotEqual(self.adapter._sentenceData["foo"], 1)
def test_unitKeyButNoUnit(self):
"""
Tests that if a unit key is provided but the unit isn't, the unit is
automatically determined from the unit key.
"""
class FakeSentence(object):
"""
A fake sentence that just has "foo" and "fooUnits" attributes.
"""
def __init__(self):
self.foo = 1
self.fooUnits = "N"
self.adapter.currentSentence = FakeSentence()
self.adapter._fixUnits(unitKey="fooUnits")
self.assertNotEqual(self.adapter._sentenceData["foo"], 1)
def test_noValueKeyAndNoUnitKey(self):
"""
Tests that when a unit is specified but neither C{valueKey} nor
C{unitKey} is provided, C{ValueError} is raised.
"""
self.assertRaises(ValueError, self.adapter._fixUnits, unit="K")
class FixerTestMixin:
"""
Mixin for tests for the fixers on L{nmea.NMEAAdapter} that adapt
from NMEA-specific notations to generic Python objects.
@ivar adapter: The NMEA adapter.
@type adapter: L{nmea.NMEAAdapter}
"""
def setUp(self):
self.adapter = nmea.NMEAAdapter(base.BasePositioningReceiver())
def _fixerTest(self, sentenceData, expected=None, exceptionClass=None):
"""
A generic adapter fixer test.
Creates a sentence from the C{sentenceData} and sends that to the
adapter. If C{exceptionClass} is not passed, this is assumed to work,
and C{expected} is compared with the adapter's internal state.
Otherwise, passing the sentence to the adapter is checked to raise
C{exceptionClass}.
@param sentenceData: Raw sentence content.
@type sentenceData: C{dict} mapping C{str} to C{str}
@param expected: The expected state of the adapter.
@type expected: C{dict} or C{None}
@param exceptionClass: The exception to be raised by the adapter.
@type exceptionClass: subclass of C{Exception}
"""
sentence = nmea.NMEASentence(sentenceData)
def receiveSentence():
self.adapter.sentenceReceived(sentence)
if exceptionClass is None:
receiveSentence()
self.assertEqual(self.adapter._state, expected)
else:
self.assertRaises(exceptionClass, receiveSentence)
self.adapter.clear()
class TimestampFixerTests(FixerTestMixin, TestCase):
"""
Tests conversion from NMEA timestamps to C{datetime.time} objects.
"""
def test_simple(self):
"""
A simple timestamp is converted correctly.
"""
data = {'timestamp': '123456'} # 12:34:56Z
expected = {'_time': datetime.time(12, 34, 56)}
self._fixerTest(data, expected)
def test_broken(self):
"""
A broken timestamp raises C{ValueError}.
"""
badTimestamps = '993456', '129956', '123499'
for t in badTimestamps:
self._fixerTest({'timestamp': t}, exceptionClass=ValueError)
class DatestampFixerTests(FixerTestMixin, TestCase):
def test_defaultYearThreshold(self):
"""
The default year threshold is 1980.
"""
self.assertEqual(self.adapter.yearThreshold, 1980)
def test_beforeThreshold(self):
"""
Dates before the threshold are interpreted as being in the century
after the threshold. (Since the threshold is the earliest possible
date.)
"""
datestring, date = '010115', datetime.date(2015, 1, 1)
self._fixerTest({'datestamp': datestring}, {'_date': date})
def test_afterThreshold(self):
"""
Dates after the threshold are interpreted as being in the same century
as the threshold.
"""
datestring, date = '010195', datetime.date(1995, 1, 1)
self._fixerTest({'datestamp': datestring}, {'_date': date})
def test_invalidMonth(self):
"""
A datestring with an invalid month (> 12) raises C{ValueError}.
"""
self._fixerTest({'datestamp': '011301'}, exceptionClass=ValueError)
def test_invalidDay(self):
"""
A datestring with an invalid day (more days than there are in that
month) raises C{ValueError}.
"""
self._fixerTest({'datestamp': '320101'}, exceptionClass=ValueError)
self._fixerTest({'datestamp': '300201'}, exceptionClass=ValueError)
def _nmeaFloat(degrees, minutes):
"""
Builds an NMEA float representation for a given angle in degrees and
decimal minutes.
@param degrees: The integer degrees for this angle.
@type degrees: C{int}
@param minutes: The decimal minutes value for this angle.
@type minutes: C{float}
@return: The NMEA float representation for this angle.
@rtype: C{str}
"""
return "%i%0.3f" % (degrees, minutes)
def _coordinateSign(hemisphere):
"""
Return the sign of a coordinate.
This is C{1} if the coordinate is in the northern or eastern hemispheres,
C{-1} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The sign of the coordinate value.
@rtype: C{int}
"""
return 1 if hemisphere in "NE" else -1
def _coordinateType(hemisphere):
"""
Return the type of a coordinate.
This is L{Angles.LATITUDE} if the coordinate is in the northern or
southern hemispheres, L{Angles.LONGITUDE} otherwise.
@param hemisphere: NMEA shorthand for the hemisphere. One of "NESW".
@type hemisphere: C{str}
@return: The type of the coordinate (L{Angles.LATITUDE} or
L{Angles.LONGITUDE})
"""
return Angles.LATITUDE if hemisphere in "NS" else Angles.LONGITUDE
class CoordinateFixerTests(FixerTestMixin, TestCase):
"""
Tests turning NMEA coordinate notations into something more pleasant.
"""
def test_north(self):
"""
NMEA coordinate representations in the northern hemisphere
convert correctly.
"""
sentenceData = {"latitudeFloat": "1030.000", "latitudeHemisphere": "N"}
state = {"latitude": base.Coordinate(10.5, Angles.LATITUDE)}
self._fixerTest(sentenceData, state)
def test_south(self):
"""
NMEA coordinate representations in the southern hemisphere
convert correctly.
"""
sentenceData = {"latitudeFloat": "1030.000", "latitudeHemisphere": "S"}
state = {"latitude": base.Coordinate(-10.5, Angles.LATITUDE)}
self._fixerTest(sentenceData, state)
def test_east(self):
"""
NMEA coordinate representations in the eastern hemisphere
convert correctly.
"""
sentenceData = {"longitudeFloat": "1030.000", "longitudeHemisphere": "E"}
state = {"longitude": base.Coordinate(10.5, Angles.LONGITUDE)}
self._fixerTest(sentenceData, state)
def test_west(self):
"""
NMEA coordinate representations in the western hemisphere
convert correctly.
"""
sentenceData = {"longitudeFloat": "1030.000", "longitudeHemisphere": "W"}
state = {"longitude": base.Coordinate(-10.5, Angles.LONGITUDE)}
self._fixerTest(sentenceData, state)
def test_badHemisphere(self):
"""
NMEA coordinate representations for nonexistent hemispheres
raise C{ValueError} when you attempt to parse them.
"""
sentenceData = {'longitudeHemisphere': 'Q'}
self._fixerTest(sentenceData, exceptionClass=ValueError)
def test_badHemisphereSign(self):
"""
NMEA coordinate repesentation parsing fails predictably
when you pass nonexistent coordinate types (not latitude or
longitude).
"""
getSign = lambda: self.adapter._getHemisphereSign("BOGUS_VALUE")
self.assertRaises(ValueError, getSign)
class AltitudeFixerTests(FixerTestMixin, TestCase):
"""
Tests that NMEA representations of altitudes are correctly converted.
"""
def test_fixAltitude(self):
"""
The NMEA representation of an altitude (above mean sea level)
is correctly converted.
"""
key, value = 'altitude', '545.4'
altitude = base.Altitude(float(value))
self._fixerTest({key: value}, {key: altitude})
def test_heightOfGeoidAboveWGS84(self):
"""
The NMEA representation of an altitude of the geoid (above the
WGS84 reference level) is correctly converted.
"""
key, value = 'heightOfGeoidAboveWGS84', '46.9'
altitude = base.Altitude(float(value))
self._fixerTest({key: value}, {key: altitude})
class SpeedFixerTests(FixerTestMixin, TestCase):
"""
Tests that NMEA representations of speeds are correctly converted.
"""
def test_speedInKnots(self):
"""
Speeds reported in knots correctly get converted to meters per
second.
"""
key, value, targetKey = "speedInKnots", "10", "speed"
speed = base.Speed(float(value) * base.MPS_PER_KNOT)
self._fixerTest({key: value}, {targetKey: speed})
class VariationFixerTests(FixerTestMixin, TestCase):
"""
Tests if the absolute values of magnetic variations on the heading
and their sign get combined correctly, and if that value gets
combined with a heading correctly.
"""
def test_west(self):
"""
Tests westward (negative) magnetic variation.
"""
variation, direction = "1.34", "W"
heading = base.Heading.fromFloats(variationValue=-1*float(variation))
sentenceData = {'magneticVariation': variation,
'magneticVariationDirection': direction}
self._fixerTest(sentenceData, {'heading': heading})
def test_east(self):
"""
Tests eastward (positive) magnetic variation.
"""
variation, direction = "1.34", "E"
heading = base.Heading.fromFloats(variationValue=float(variation))
sentenceData = {'magneticVariation': variation,
'magneticVariationDirection': direction}
self._fixerTest(sentenceData, {'heading': heading})
def test_withHeading(self):
"""
Variation values get combined with headings correctly.
"""
trueHeading, variation, direction = "123.12", "1.34", "E"
sentenceData = {'trueHeading': trueHeading,
'magneticVariation': variation,
'magneticVariationDirection': direction}
heading = base.Heading.fromFloats(float(trueHeading),
variationValue=float(variation))
self._fixerTest(sentenceData, {'heading': heading})
class PositionErrorFixerTests(FixerTestMixin, TestCase):
"""
Position errors in NMEA are passed as dilutions of precision (DOP). This
is a measure relative to some specified value of the GPS device as its
"reference" precision. Unfortunately, there are very few ways of figuring
this out from just the device (sans manual).
There are two basic DOP values: vertical and horizontal. HDOP tells you
how precise your location is on the face of the earth (pretending it's
flat, at least locally). VDOP tells you how precise your altitude is
known. PDOP (position DOP) is a dependent value defined as the Euclidean
norm of those two, and gives you a more generic "goodness of fix" value.
"""
def test_simple(self):
self._fixerTest(
{'horizontalDilutionOfPrecision': '11'},
{'positionError': base.PositionError(hdop=11.)})
def test_mixing(self):
pdop, hdop, vdop = "1", "1", "1"
positionError = base.PositionError(pdop=float(pdop),
hdop=float(hdop),
vdop=float(vdop))
sentenceData = {'positionDilutionOfPrecision': pdop,
'horizontalDilutionOfPrecision': hdop,
'verticalDilutionOfPrecision': vdop}
self._fixerTest(sentenceData, {"positionError": positionError})
class ValidFixTests(FixerTestMixin, TestCase):
"""
Tests that data reported from a valid fix is used.
"""
def test_GGA(self):
"""
GGA data with a valid fix is used.
"""
sentenceData = {'type': 'GPGGA',
'altitude': '545.4',
'fixQuality': nmea.GPGGAFixQualities.GPS_FIX}
expectedState = {'altitude': base.Altitude(545.4)}
self._fixerTest(sentenceData, expectedState)
def test_GLL(self):
"""
GLL data with a valid data mode is used.
"""
sentenceData = {'type': 'GPGLL',
'altitude': '545.4',
'dataMode': nmea.GPGLLGPRMCFixQualities.ACTIVE}
expectedState = {'altitude': base.Altitude(545.4)}
self._fixerTest(sentenceData, expectedState)
class InvalidFixTests(FixerTestMixin, TestCase):
"""
Tests that data being reported from a bad or incomplete fix isn't
used. Although the specification dictates that GPSes shouldn't produce
NMEA sentences with real-looking values for altitude or position in them
unless they have at least some semblance of a GPS fix, this is widely
ignored.
"""
def _invalidFixTest(self, sentenceData):
"""
Sentences with an invalid fix or data mode result in empty
state (ie, the data isn't used).
"""
self._fixerTest(sentenceData, {})
def test_GGA(self):
"""
GGA sentence data is unused when there is no fix.
"""
sentenceData = {'type': 'GPGGA',
'altitude': '545.4',
'fixQuality': nmea.GPGGAFixQualities.INVALID_FIX}
self._invalidFixTest(sentenceData)
def test_GLL(self):
"""
GLL sentence data is unused when the data is flagged as void.
"""
sentenceData = {'type': 'GPGLL',
'altitude': '545.4',
'dataMode': nmea.GPGLLGPRMCFixQualities.VOID}
self._invalidFixTest(sentenceData)
def test_badGSADataMode(self):
"""
GSA sentence data is not used when there is no GPS fix, but
the data mode claims the data is "active". Some GPSes do do
this, unfortunately, and that means you shouldn't use the
data.
"""
sentenceData = {'type': 'GPGSA',
'altitude': '545.4',
'dataMode': nmea.GPGLLGPRMCFixQualities.ACTIVE,
'fixType': nmea.GPGSAFixTypes.GSA_NO_FIX}
self._invalidFixTest(sentenceData)
def test_badGSAFixType(self):
"""
GSA sentence data is not used when the fix claims to be valid
(albeit only 2D), but the data mode says the data is void.
Some GPSes do do this, unfortunately, and that means you
shouldn't use the data.
"""
sentenceData = {'type': 'GPGSA',
'altitude': '545.4',
'dataMode': nmea.GPGLLGPRMCFixQualities.VOID,
'fixType': nmea.GPGSAFixTypes.GSA_2D_FIX}
self._invalidFixTest(sentenceData)
def test_badGSADataModeAndFixType(self):
"""
GSA sentence data is not use when neither the fix nor the data
mode is any good.
"""
sentenceData = {'type': 'GPGSA',
'altitude': '545.4',
'dataMode': nmea.GPGLLGPRMCFixQualities.VOID,
'fixType': nmea.GPGSAFixTypes.GSA_NO_FIX}
self._invalidFixTest(sentenceData)
class NMEAReceiverTest(TestCase):
"""
Tests for the NMEA receiver.
"""
def setUp(self):
self.receiver = MockPositioningReceiver()
self.adapter = nmea.NMEAAdapter(self.receiver)
self.protocol = nmea.NMEAProtocol(self.adapter)
def test_onlyFireWhenCurrentSentenceHasNewInformation(self):
"""
If the current sentence does not contain any new fields for a
particular callback, that callback is not called; even if all
necessary information is still in the state from one or more
previous messages.
"""
self.protocol.lineReceived(GPGGA)
GPGGACallbacks = set(['positionReceived',
'positionErrorReceived',
'altitudeReceived'])
self.assertEqual(set(self.receiver.called.keys()), GPGGACallbacks)
self.receiver.clear()
self.assertNotEqual(self.adapter._state, {})
# GPHDT contains heading infromation but not position,
# altitude or anything like that; but that information is
# still in the state.
self.protocol.lineReceived(GPHDT)
GPHDTCallbacks = set(['headingReceived'])
self.assertEqual(set(self.receiver.called.keys()), GPHDTCallbacks)
def _receiverTest(self, sentences, expectedFired=(), extraTest=None):
"""
A generic test for NMEA receiver behavior.
@param sentences: The sequence of sentences to simulate receiving.
@type sentences: iterable of C{str}
@param expectedFired: The names of the callbacks expected to fire.
@type expectedFired: iterable of C{str}
@param extraTest: An optional extra test hook.
@type extraTest: nullary callable
"""
for sentence in sentences:
self.protocol.lineReceived(sentence)
actuallyFired = self.receiver.called.keys()
self.assertEqual(set(actuallyFired), set(expectedFired))
if extraTest is not None:
extraTest()
self.receiver.clear()
self.adapter.clear()
def test_positionErrorUpdateAcrossStates(self):
"""
The positioning error is updated across multiple states.
"""
sentences = [GPGSA] + GPGSV_SEQ
callbacksFired = ['positionErrorReceived', 'beaconInformationReceived']
def _getIdentifiers(beacons):
return sorted(map(attrgetter("identifier"), beacons))
def checkBeaconInformation():
beaconInformation = self.adapter._state['beaconInformation']
seenIdentifiers = _getIdentifiers(beaconInformation.seenBeacons)
expected = [3, 4, 6, 13, 14, 16, 18, 19, 22, 24, 27]
self.assertEqual(seenIdentifiers, expected)
usedIdentifiers = _getIdentifiers(beaconInformation.usedBeacons)
# These are not actually all the PRNs in the sample GPGSA:
# only the ones also reported by the GPGSV sequence. This
# is just because the sample data doesn't come from the
# same reporting cycle of a GPS device.
self.assertEqual(usedIdentifiers, [14, 18, 19, 22, 27])
self._receiverTest(sentences, callbacksFired, checkBeaconInformation)
def test_emptyMiddleGSV(self):
"""
A GSV sentence with empty entries in any position does not mean that
entries in subsequent positions of the same GSV sentence are ignored.
"""
sentences = [GPGSV_EMPTY_MIDDLE]
callbacksFired = ['beaconInformationReceived']
def checkBeaconInformation():
beaconInformation = self.adapter._state['beaconInformation']
seenBeacons = beaconInformation.seenBeacons
self.assertEqual(len(seenBeacons), 2)
self.assertIn(13, [b.identifier for b in seenBeacons])
self._receiverTest(sentences, callbacksFired, checkBeaconInformation)
def test_GGASentences(self):
"""
A sequence of GGA sentences fires C{positionReceived},
C{positionErrorReceived} and C{altitudeReceived}.
"""
sentences = [GPGGA]
callbacksFired = ['positionReceived',
'positionErrorReceived',
'altitudeReceived']
self._receiverTest(sentences, callbacksFired)
def test_GGAWithDateInState(self):
"""
When receiving a GPGGA sentence and a date was already in the
state, the new time (from the GPGGA sentence) is combined with
that date.
"""
self.adapter._state["_date"] = datetime.date(2014, 1, 1)
sentences = [GPGGA]
callbacksFired = ['positionReceived',
'positionErrorReceived',
'altitudeReceived',
'timeReceived']
self._receiverTest(sentences, callbacksFired)
def test_RMCSentences(self):
"""
A sequence of RMC sentences fires C{positionReceived},
C{speedReceived}, C{headingReceived} and C{timeReceived}.
"""
sentences = [GPRMC]
callbacksFired = ['headingReceived',
'speedReceived',
'positionReceived',
'timeReceived']
self._receiverTest(sentences, callbacksFired)
def test_GSVSentences(self):
"""
A complete sequence of GSV sentences fires
C{beaconInformationReceived}.
"""
sentences = [GPGSV_FIRST, GPGSV_MIDDLE, GPGSV_LAST]
callbacksFired = ['beaconInformationReceived']
def checkPartialInformation():
self.assertNotIn('_partialBeaconInformation', self.adapter._state)
self._receiverTest(sentences, callbacksFired, checkPartialInformation)
def test_emptyMiddleEntriesGSVSequence(self):
"""
A complete sequence of GSV sentences with empty entries in the
middle still fires C{beaconInformationReceived}.
"""
sentences = [GPGSV_EMPTY_MIDDLE]
self._receiverTest(sentences, ["beaconInformationReceived"])
def test_incompleteGSVSequence(self):
"""
An incomplete sequence of GSV sentences does not fire any callbacks.
"""
sentences = [GPGSV_FIRST]
self._receiverTest(sentences)
def test_singleSentenceGSVSequence(self):
"""
The parser does not fail badly when the sequence consists of
only one sentence (but is otherwise complete).
"""
sentences = [GPGSV_SINGLE]
self._receiverTest(sentences, ["beaconInformationReceived"])
def test_GLLSentences(self):
"""
GLL sentences fire C{positionReceived}.
"""
sentences = [GPGLL_PARTIAL, GPGLL]
self._receiverTest(sentences, ['positionReceived'])
def test_HDTSentences(self):
"""
HDT sentences fire C{headingReceived}.
"""
sentences = [GPHDT]
self._receiverTest(sentences, ['headingReceived'])
def test_mixedSentences(self):
"""
A mix of sentences fires the correct callbacks.
"""
sentences = [GPRMC, GPGGA]
callbacksFired = ['altitudeReceived',
'speedReceived',
'positionReceived',
'positionErrorReceived',
'timeReceived',
'headingReceived']
def checkTime():
expectedDateTime = datetime.datetime(1994, 3, 23, 12, 35, 19)
self.assertEqual(self.adapter._state['time'], expectedDateTime)
self._receiverTest(sentences, callbacksFired, checkTime)
def test_lotsOfMixedSentences(self):
"""
Sends an entire gamut of sentences and verifies the
appropriate callbacks fire. These are more than you'd expect
from your average consumer GPS device. They have most of the
important information, including beacon information and
visibility.
"""
sentences = [GPGSA] + GPGSV_SEQ + [GPRMC, GPGGA, GPGLL]
callbacksFired = ['headingReceived',
'beaconInformationReceived',
'speedReceived',
'positionReceived',
'timeReceived',
'altitudeReceived',
'positionErrorReceived']
self._receiverTest(sentences, callbacksFired)
|
|
"""Tests for eval_lib.work_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import unittest
from six import assertCountEqual
from six import itervalues
from eval_lib import work_data
from eval_lib.tests import fake_cloud_client
TEST_WORK_TYPE_ENTITY_ID = 'AllWork'
class WorkPiecesBaseTest(unittest.TestCase):
def setUp(self):
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
self.work1 = {'submission_id': 's1',
'output_adversarial_batch_id': 'o1',
'claimed_worker_id': 'worker9999',
'claimed_worker_start_time': -1,
'is_completed': True}
self.work2 = {'submission_id': 's2',
'output_adversarial_batch_id': 'o2',
'claimed_worker_id': None,
'claimed_worker_start_time': None,
'is_completed': False}
def reset_work_pieces(self):
self.work_pieces = work_data.WorkPiecesBase(self.datastore_client,
TEST_WORK_TYPE_ENTITY_ID)
def test_is_unclaimed(self):
# completed work considered claimed
self.assertFalse(work_data.is_unclaimed(self.work1))
# not completed, not claimed work
self.assertTrue(work_data.is_unclaimed(self.work2))
# claimed but not completed work
self.work2['claimed_worker_id'] = 'some_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.assertFalse(work_data.is_unclaimed(self.work2))
# work claimed too long ago considered unclaimed now
self.work2['claimed_worker_start_time'] = (
work_data.get_integer_time() - work_data.MAX_PROCESSING_TIME - 1)
self.assertTrue(work_data.is_unclaimed(self.work2))
def test_write_to_datastore(self):
self.reset_work_pieces()
self.work_pieces.work['w1'] = self.work1
self.work_pieces.work['w2'] = self.work2
self.work_pieces.write_all_to_datastore()
# verify content of the datastore
parent_key = fake_cloud_client.FakeDatastoreKey(work_data.KIND_WORK_TYPE,
TEST_WORK_TYPE_ENTITY_ID)
assertCountEqual(
self, [fake_cloud_client.make_entity(parent_key)],
self.datastore_client.query_fetch(kind=work_data.KIND_WORK_TYPE))
entity1 = fake_cloud_client.make_entity(
fake_cloud_client.FakeDatastoreKey(
work_data.KIND_WORK, 'w1', parent=parent_key))
entity1.update(self.work1)
entity2 = fake_cloud_client.make_entity(
fake_cloud_client.FakeDatastoreKey(
work_data.KIND_WORK, 'w2', parent=parent_key))
entity2.update(self.work2)
assertCountEqual(
self, [entity1, entity2],
self.datastore_client.query_fetch(kind=work_data.KIND_WORK))
def test_read_from_datastore(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
# verify data
self.assertDictEqual({'w10': self.work1, 'w20': self.work2},
self.work_pieces.work)
def test_is_all_work_completed(self):
self.reset_work_pieces()
# empty set of work is considered completed
self.assertTrue(self.work_pieces.is_all_work_competed())
# one completed piece of work - all work completed
self.work_pieces.work['w11'] = copy.deepcopy(self.work1)
self.assertTrue(self.work_pieces.is_all_work_competed())
# two completed pieces of work - all work completed
self.work_pieces.work['w12'] = copy.deepcopy(self.work1)
self.assertTrue(self.work_pieces.is_all_work_competed())
# two completed and one incomplete pieces of work - work not completed
self.work_pieces.work['w2'] = copy.deepcopy(self.work2)
self.assertFalse(self.work_pieces.is_all_work_competed())
def test_read_undone_from_datastore(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
self.reset_work_pieces()
# return value is None because sharding is not used
self.assertIsNone(self.work_pieces.read_undone_from_datastore())
# Only work with ID 'w20' is undone
self.assertDictEqual({'w20': self.work2}, self.work_pieces.work)
def test_read_undone_from_datastore_same_shards(self):
self.reset_work_pieces()
self.work1['shard_id'] = 1
self.work_pieces.work['w10'] = self.work1
self.work2['shard_id'] = 2
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
self.reset_work_pieces()
# return value is ID of the shard with undone work
self.assertEqual(2, self.work_pieces.read_undone_from_datastore(
shard_id=2, num_shards=3))
# Only work with ID 'w20' is undone
self.assertDictEqual({'w20': self.work2}, self.work_pieces.work)
def test_read_undone_from_datastore_different_shards(self):
self.reset_work_pieces()
self.work1['shard_id'] = 1
self.work_pieces.work['w10'] = self.work1
self.work2['shard_id'] = 2
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
self.reset_work_pieces()
# return value is ID of the shard with undone work
self.assertEqual(2, self.work_pieces.read_undone_from_datastore(
shard_id=1, num_shards=3))
# Only work with ID 'w20' is undone
self.assertDictEqual({'w20': self.work2}, self.work_pieces.work)
def test_try_pick_piece_of_work_simple(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
self.assertEqual('w20', work_id)
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
self.assertEqual('worker0',
self.work_pieces.work['w20']['claimed_worker_id'])
def test_try_pick_piece_of_work_all_completed(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.work['w20']['is_completed'] = True
self.work_pieces.write_all_to_datastore()
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
self.assertIsNone(work_id)
def test_try_pick_piece_of_work_already_claimed(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work2['claimed_worker_id'] = 'other_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
# if work is claimed by another worker then it won't be picked
self.assertIsNone(work_id)
def test_try_pick_piece_of_work_claimed_long_ago(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work2['claimed_worker_id'] = 'other_worker'
self.work2['claimed_worker_start_time'] = (
work_data.get_integer_time() - work_data.MAX_PROCESSING_TIME * 2)
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
# if work is claimed by another worker, but it happened some time ago
# then work will be claimed
self.assertEqual('w20', work_id)
def test_try_pick_piece_of_work_concurrent_update(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
# any concurrent change in the entity will cause transaction to fail
def transaction_hook(client):
key = client.key('WorkType', TEST_WORK_TYPE_ENTITY_ID, 'Work', 'w20')
client.entities[key]['output_adversarial_batch_id'] = 'o3'
self.datastore_client.set_transaction_hook(transaction_hook)
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
self.assertIsNone(work_id)
def test_try_pick_piece_of_work_concurrent_update_of_other(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work_pieces.write_all_to_datastore()
# concurrent change in entity which is not touched by the transaction
# won't prevent transaction from completing
def transaction_hook(client):
key = client.key('WorkType', TEST_WORK_TYPE_ENTITY_ID, 'Work', 'w10')
client.entities[key]['output_adversarial_batch_id'] = 'o3'
self.datastore_client.set_transaction_hook(transaction_hook)
work_id = self.work_pieces.try_pick_piece_of_work('worker0')
self.assertEqual('w20', work_id)
def test_update_work_as_completed(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work2['claimed_worker_id'] = 'this_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.work_pieces.write_all_to_datastore()
self.assertTrue(
self.work_pieces.update_work_as_completed('this_worker', 'w20'))
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
self.assertTrue(self.work_pieces.work['w20']['is_completed'])
self.assertNotIn('error', self.work_pieces.work['w20'])
def test_update_work_as_completed_other_values(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work2['claimed_worker_id'] = 'this_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.work_pieces.write_all_to_datastore()
self.assertTrue(
self.work_pieces.update_work_as_completed(
'this_worker', 'w20', other_values={'a': 123, 'b': 456}))
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
self.assertTrue(self.work_pieces.work['w20']['is_completed'])
self.assertNotIn('error', self.work_pieces.work['w20'])
self.assertEqual(123, self.work_pieces.work['w20']['a'])
self.assertEqual(456, self.work_pieces.work['w20']['b'])
def test_update_work_as_completed_with_error(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work2['claimed_worker_id'] = 'this_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.work_pieces.write_all_to_datastore()
self.assertTrue(
self.work_pieces.update_work_as_completed(
'this_worker', 'w20', error='err'))
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
self.assertTrue(self.work_pieces.work['w20']['is_completed'])
self.assertEqual('err', self.work_pieces.work['w20']['error'])
def test_update_work_as_completed_wrong_claimed_worker(self):
self.reset_work_pieces()
self.work_pieces.work['w10'] = self.work1
self.work_pieces.work['w20'] = self.work2
self.work2['claimed_worker_id'] = 'other_worker'
self.work2['claimed_worker_start_time'] = work_data.get_integer_time()
self.work_pieces.write_all_to_datastore()
self.assertFalse(
self.work_pieces.update_work_as_completed('this_worker', 'w20'))
self.reset_work_pieces()
self.work_pieces.read_all_from_datastore()
self.assertFalse(self.work_pieces.work['w20']['is_completed'])
def test_compute_work_stats(self):
self.reset_work_pieces()
self.work_pieces.work['w11'] = {
'submission_id': 's1',
'output_adversarial_batch_id': 'o1',
'claimed_worker_id': 'worker1',
'claimed_worker_start_time': -1,
'is_completed': True,
'elapsed_time': 1,
}
self.work_pieces.work['w12'] = {
'submission_id': 's1',
'output_adversarial_batch_id': 'o2',
'claimed_worker_id': 'worker2',
'claimed_worker_start_time': -1,
'is_completed': False,
}
self.work_pieces.work['w21'] = {
'submission_id': 's2',
'output_adversarial_batch_id': 'o1',
'claimed_worker_id': 'worker1',
'claimed_worker_start_time': -1,
'is_completed': True,
'elapsed_time': 5,
}
self.work_pieces.work['w22'] = {
'submission_id': 's2',
'output_adversarial_batch_id': 'o2',
'claimed_worker_id': 'worker2',
'claimed_worker_start_time': -1,
'is_completed': True,
'elapsed_time': 10,
'error': 'err',
}
self.work_pieces.work['w23'] = {
'submission_id': 's2',
'output_adversarial_batch_id': 'o1',
'claimed_worker_id': 'worker1',
'claimed_worker_start_time': -1,
'is_completed': True,
'elapsed_time': 7,
}
stats = self.work_pieces.compute_work_statistics()
for v in itervalues(stats):
v['eval_times'] = sorted(v['eval_times'])
self.assertDictEqual(
{
's1': {'completed': 1,
'num_errors': 0,
'error_messages': set(),
'eval_times': [1.0],
'min_eval_time': 1.0,
'max_eval_time': 1.0,
'mean_eval_time': 1.0,
'median_eval_time': 1.0},
's2': {'completed': 3,
'num_errors': 1,
'error_messages': set(['err']),
'eval_times': [5.0, 7.0],
'min_eval_time': 5.0,
'max_eval_time': 7.0,
'mean_eval_time': 6.0,
'median_eval_time': 6.0},
}, stats)
class AttackWorkPiecesTest(unittest.TestCase):
def setUp(self):
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
def test_init_from_adversarial_batches(self):
adv_batches = {
'ADVBATCH000': {'submission_id': 's1'},
'ADVBATCH001': {'submission_id': 's2'},
'ADVBATCH002': {'submission_id': 's3'},
}
expected_values = [
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's1', 'shard_id': None,
'output_adversarial_batch_id': 'ADVBATCH000'},
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's2', 'shard_id': None,
'output_adversarial_batch_id': 'ADVBATCH001'},
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's3', 'shard_id': None,
'output_adversarial_batch_id': 'ADVBATCH002'}
]
attack_work = work_data.AttackWorkPieces(self.datastore_client)
attack_work.init_from_adversarial_batches(adv_batches)
assertCountEqual(self, expected_values, attack_work.work.values())
attack_work.write_all_to_datastore()
attack_work = work_data.AttackWorkPieces(self.datastore_client)
attack_work.read_all_from_datastore()
assertCountEqual(self, expected_values, attack_work.work.values())
class DefenseWorkPiecesTest(unittest.TestCase):
def setUp(self):
self.datastore_client = fake_cloud_client.FakeDatastoreClient()
def test_init_from_classification_batches(self):
class_batches = {
'CBATCH000000': {'submission_id': 's1'},
'CBATCH000001': {'submission_id': 's2'},
'CBATCH000002': {'submission_id': 's3'},
}
expected_values = [
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's1', 'shard_id': None,
'output_classification_batch_id': 'CBATCH000000'},
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's2', 'shard_id': None,
'output_classification_batch_id': 'CBATCH000001'},
{'claimed_worker_id': None, 'claimed_worker_start_time': None,
'is_completed': False, 'error': None, 'elapsed_time': None,
'submission_id': 's3', 'shard_id': None,
'output_classification_batch_id': 'CBATCH000002'}
]
defense_work = work_data.DefenseWorkPieces(self.datastore_client)
defense_work.init_from_class_batches(class_batches)
assertCountEqual(self, expected_values, defense_work.work.values())
defense_work.write_all_to_datastore()
defense_work = work_data.DefenseWorkPieces(self.datastore_client)
defense_work.read_all_from_datastore()
assertCountEqual(self, expected_values, defense_work.work.values())
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
################################################################################
# \file build.py
# \author Gregory Diamos <[email protected]>
# \date Sunday March 13, 2011
# \brief The Ocelot build script to direct scons builds and run unit tests
################################################################################
import os
import re
import subprocess
import time
from optparse import OptionParser
import sys
################################################################################
## Build Ocelot
def build(options):
command = "scons -Q"
if options.clean:
command += " -c"
if options.debug:
command += " mode=debug"
if options.no_werr:
command += " Werror=false"
if options.no_wall:
command += " Wall=false"
if options.no_llvm:
command += " enable_llvm=false"
if options.no_opengl:
command += " enable_opengl=false"
if options.no_cuda_runtime:
command += " enable_cuda_runtime=false"
if options.static:
command += " library=static"
if options.build_deb:
if not options.install:
print "Install must be set for a debian build, setting it"
options.install = True
command += " debian"
if options.install:
command += " install=true"
if options.install_prefix:
command += " install_path=" + options.install_prefix
if options.build_target != '':
if options.debug:
command += " .debug_build/"
else:
command += " .release_build/"
command += options.build_target
if options.test_level != 'none':
command += " tests test_level=" + options.test_level
if options.threads > 1:
command += " -j" + str(options.threads)
# Run SCons
print command
# Flush above message as on Windows it is not appearing until
# after following subprocess completes.
sys.stdout.flush()
scons = subprocess.Popen(command, shell=True)
return scons.wait() == 0
################################################################################
################################################################################
## Run Unit Tests
def runUnitTests(options, buildSucceeded):
if not buildSucceeded:
print "Build failed..."
return False
if options.clean:
print "Build cleaned..."
return False
if options.test_level == 'none':
return False
command = "python hydrazine/python/RunRegression.py -v"
if options.debug:
command += " -p .debug_build/"
prefix = "debug"
else:
command += " -p .release_build/"
prefix = "release"
if options.test_level == 'basic':
log = "regression/" + prefix + "-basic.log"
command += " -t regression/basic.level"
elif options.test_level == 'full':
log = "regression/" + prefix + "-full.log"
command += " -t regression/full.level"
elif options.test_level == 'sass':
log = "regression/" + prefix + "-sass.log"
command += " -t regression/sass.level"
else:
print "Unsupported test_level of '" + options.test_level + "'"
return False
command += " -l " + log
print '\nRunning Ocelot Unit Tests...'
print command
status = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout.read()
print status
# Check for any failing/missing tests
if re.search('Failing tests|Non-Existent tests', status):
return False
else:
return True
################################################################################
################################################################################
## Submit to SVN
def submit(options, testPassed):
if not options.submit:
return
if len(options.message) == 0:
print "Log message not specified (use -m)"
return
if not testPassed:
print "Regression tests failed or not run, commit prohibited."
return
command = "svn commit -m \"" + options.message + "\""
os.system(command)
################################################################################
################################################################################
## Main
def main():
parser = OptionParser()
parser.add_option( "-c", "--clean", \
default = False, action = "store_true",
help = "delete all build results except previously installed files" )
parser.add_option( "-d", "--debug", \
default = False, action = "store_true", \
help = "build Ocelot in debug mode." )
parser.add_option( "-t", "--test_level", default = "none", \
help = "set the test level (none, basic, full, sass)" )
parser.add_option( "-j", "--threads", "--jobs", dest="threads",
type="int", default = "1" )
parser.add_option( "-s", "--submit", \
default = False, action = "store_true" )
parser.add_option( "-S", "--static", \
default = False, action = "store_true",
help = "Statically link ocelot." )
parser.add_option( "-i", "--install", \
default = False, action = "store_true", help = "Install ocelot." )
parser.add_option( "-b", "--build_target", \
default = "", help = "build a specific target." )
parser.add_option( "-a", "--no_wall", \
default = False, action = "store_true", help =
"don't display all warnings." )
parser.add_option( "-w", "--no_werr", \
default = False, action = "store_true", help =
"don't turn warnings into errors." )
parser.add_option( "-p", "--install_prefix", \
help = "The base path to install ocelot in." )
parser.add_option( "--build_deb", \
default = False, action = "store_true",
help = "Build a .deb package of Ocelot." )
parser.add_option( "--no_llvm", \
default = False, action = "store_true", help = "Disable llvm support." )
parser.add_option( "--no_opengl", \
default = False, action = "store_true", help = "Disable opengl support." )
parser.add_option( "--no_cuda_runtime", \
default = False, action = "store_true",
help = "Disable exporting cuda runtime symbols." )
parser.add_option( "-m", "--message", default = "", \
help = "the message describing the changes being committed." )
( options, arguments ) = parser.parse_args()
if options.submit:
if options.test_level != 'full':
print "Full test level required for a submit."
options.test_level = 'full'
options.build_target = ''
# Do the build
buildSucceeded = build(options)
# Run unit tests
testsPassed = runUnitTests(options, buildSucceeded)
# Submit if the tests pass
submit(options, testsPassed)
if (buildSucceeded and (options.clean or
(options.test_level == 'none') or testsPassed)):
sys.exit(0)
else:
print "Build failed"
sys.exit(1)
################################################################################
################################################################################
## Guard Main
if __name__ == "__main__":
main()
################################################################################
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from openstackx.api import exceptions as api_exceptions
from mox import IsA
from horizon import api
from horizon import test
SYSPANEL_INDEX_URL = reverse('horizon:syspanel:overview:index')
DASH_INDEX_URL = reverse('horizon:nova:overview:index')
class AuthViewTests(test.BaseViewTests):
def setUp(self):
super(AuthViewTests, self).setUp()
self.setActiveUser()
self.PASSWORD = 'secret'
def test_login_index(self):
res = self.client.get(reverse('horizon:auth_login'))
self.assertTemplateUsed(res, 'splash.html')
def test_login_user_logged_in(self):
self.setActiveUser(self.TEST_TOKEN, self.TEST_USER, self.TEST_TENANT,
False, self.TEST_SERVICE_CATALOG)
res = self.client.get(reverse('horizon:auth_login'))
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
def test_login_no_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {'roles': [{'name': 'fake'}]},
serviceCatalog = {}
aToken = api.Token(FakeToken())
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([])
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(unicode))
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
def test_login(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
class FakeToken(object):
id = TOKEN_ID,
user = {"id": "1",
"roles": [{"id": "1", "name": "fake"}], "name": "user"}
serviceCatalog = {}
tenant = None
aToken = api.Token(FakeToken())
bToken = aToken
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
bToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([aTenant])
self.mox.StubOutWithMock(api, 'token_create_scoped')
api.token_create_scoped(IsA(http.HttpRequest), aTenant.id,
aToken.id).AndReturn(bToken)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
def test_login_invalid_credentials(self):
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
unauthorized = api_exceptions.Unauthorized('unauth', message='unauth')
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndRaise(unauthorized)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
def test_login_exception(self):
form_data = {'method': 'Login',
'password': self.PASSWORD,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
api_exception = api_exceptions.ApiException('apiException',
message='apiException')
api.token_create(IsA(http.HttpRequest), "", self.TEST_USER,
self.PASSWORD).AndRaise(api_exception)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_login'), form_data)
self.assertTemplateUsed(res, 'splash.html')
def test_switch_tenants_index(self):
res = self.client.get(reverse('horizon:auth_switch',
args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'switch_tenants.html')
def test_switch_tenants(self):
NEW_TENANT_ID = '6'
NEW_TENANT_NAME = 'FAKENAME'
TOKEN_ID = 1
self.setActiveUser(self.TEST_USER_ID, self.TEST_TOKEN, self.TEST_USER,
self.TEST_TENANT, False, self.TEST_SERVICE_CATALOG)
form_data = {'method': 'LoginWithTenant',
'password': self.PASSWORD,
'tenant': NEW_TENANT_ID,
'username': self.TEST_USER}
self.mox.StubOutWithMock(api, 'token_create')
aTenant = self.mox.CreateMock(api.Token)
aTenant.id = NEW_TENANT_ID
aTenant.name = NEW_TENANT_NAME
aToken = self.mox.CreateMock(api.Token)
aToken.id = TOKEN_ID
aToken.user = {'id': self.TEST_USER_ID,
'name': self.TEST_USER, 'roles': [{'name': 'fake'}]}
aToken.serviceCatalog = {}
aToken.tenant = {'id': aTenant.id, 'name': aTenant.name}
api.token_create(IsA(http.HttpRequest), NEW_TENANT_ID, self.TEST_USER,
self.PASSWORD).AndReturn(aToken)
self.mox.StubOutWithMock(api, 'tenant_list_for_token')
api.tenant_list_for_token(IsA(http.HttpRequest), aToken.id).\
AndReturn([aTenant])
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:auth_switch',
args=[NEW_TENANT_ID]), form_data)
self.assertRedirectsNoFollow(res, DASH_INDEX_URL)
self.assertEqual(self.client.session['tenant'], NEW_TENANT_NAME)
def test_logout(self):
KEY = 'arbitraryKeyString'
VALUE = 'arbitraryKeyValue'
self.assertNotIn(KEY, self.client.session)
self.client.session[KEY] = VALUE
res = self.client.get(reverse('horizon:auth_logout'))
self.assertRedirectsNoFollow(res, reverse('splash'))
self.assertNotIn(KEY, self.client.session)
|
|
#!/usr/bin/env python
"""CoNLL 2002 shared task format support."""
__author__ = 'Sampo Pyysalo'
__license__ = 'MIT'
import sys
import re
import os
import codecs
import json
INPUT_ENCODING = "Latin-1"
OUTPUT_ENCODING = "UTF-8"
def argparser():
import argparse
parser = argparse.ArgumentParser(description="Convert CoNLL'02 data.")
parser.add_argument('-o', '--output', metavar='DIR', default=None,
help='Output directory.')
parser.add_argument('-f', '--format', choices=['oa', 'ann'], default='oa',
help='Output format.')
parser.add_argument('file', nargs='+', help='Source file(s).')
return parser
class Standoff(object):
def __init__(self, id_, type_, start, end, text):
self.id = id_
self.type = type_
self.start = start
self.end = end
self.text = text
self.validate()
def to_oa(self, docpath):
"""Convert Standoff to Open Annotation."""
# Assume Web Annotation WG context
annotation = {
'@id': str(self.id),
'@type': 'oa:Annotation',
'target': docpath + '#char=%d,%d' % (self.start, self.end),
'body': self.type
}
return annotation
def validate(self):
# sanity checks
assert '\n' not in self.text, "ERROR: newline in span '%s'" % \
(self.text)
assert self.text == self.text.strip(), \
"ERROR: span contains extra whitespace: '%s'" % (self.text)
def __unicode__(self):
return "T%d\t%s %d %d\t%s" % \
(self.id, self.type, self.start, self.end, self.text)
def is_quote(s):
return s in ('"', )
def include_space(t1, t2, quote_count = None):
# Helper for reconstructing sentence text. Given the text of two
# consecutive tokens, returns a heuristic estimate of whether a
# space character should be placed between them.
if re.match(r'^[\(]$', t1):
return False
if re.match(r'^[.,\)\?\!]$', t2):
return False
if is_quote(t1) and quote_count is not None and quote_count % 2 == 1:
return False
if is_quote(t2) and quote_count is not None and quote_count % 2 == 1:
return False
return True
def output_filenames(dir, infn, docnum, suffix):
outfn = os.path.join(dir, os.path.basename(infn)+'-doc-'+str(docnum))
return outfn+'.txt', outfn+'.'+suffix
def prettyprint(doc):
"""Pretty-print JSON document."""
return json.dumps(doc, sort_keys=True, indent=2, separators=(',', ': '))
def write_ann(textout, annout, text, standoffs):
for so in standoffs:
print >> annout, unicode(so)
print >> textout, text
write_ann.suffix = 'ann'
def write_oa(textout, annout, text, standoffs):
document = {
'@context': 'http://nlplab.org/ns/restoa-context-20150307.json',
'@graph': []
}
for so in standoffs:
document['@graph'].append(so.to_oa(os.path.basename(textout.name)))
print >> annout, prettyprint(document)
print >> textout, text
write_oa.suffix = 'jsonld'
def make_output_function(directory, basename, writer):
"""Return function that invokes the writer with text and standoffs."""
def output(text, standoffs):
if directory is None:
writer(sys.stdout, sys.stdout, text, standoffs)
else:
txtfn, sofn = output_filenames(directory, basename, output.docnum,
writer.suffix)
with codecs.open(txtfn, 'wt', encoding=OUTPUT_ENCODING) as txtf:
with codecs.open(sofn, 'wt', encoding=OUTPUT_ENCODING) as sof:
writer(txtf, sof, text, standoffs)
output.docnum += 1
output.docnum = 1
return output
def text_and_standoffs(sentences):
"""Convert (token, tag, type) sequences into text and Standoffs."""
offset, idnum = 0, 1
doctext = ""
standoffs = []
for si, sentence in enumerate(sentences):
prev_token = None
prev_tag = "O"
curr_start, curr_type = None, None
quote_count = 0
for token, ttag, ttype in sentence:
if curr_type is not None and (ttag != "I" or ttype != curr_type):
# a previously started tagged sequence does not
# continue into this position.
text = doctext[curr_start:offset]
so = Standoff(idnum, curr_type, curr_start, offset, text)
standoffs.append(so)
idnum += 1
curr_start, curr_type = None, None
if (prev_token is not None and
include_space(prev_token, token, quote_count)):
doctext = doctext + ' '
offset += 1
if curr_type is None and ttag != "O":
# a new tagged sequence begins here
curr_start, curr_type = offset, ttype
doctext = doctext + token
offset += len(token)
if is_quote(token):
quote_count += 1
prev_token = token
prev_tag = ttag
# leftovers?
if curr_type is not None:
text = doctext[curr_start:offset]
so = Standoff(idnum, curr_type, curr_start, offset, text)
standoffs.append(so)
idnum += 1
if si+1 != len(sentences):
doctext = doctext + '\n'
offset += 1
return doctext, standoffs
def lookahead(iterable, distance=1):
"""Yield tuples of current item and next items from iterable."""
# modified from https://github.com/maaku/lookahead/
iterator = iter(iterable)
# Fill initial
items = [iterator.next()]
for i in range(distance):
try:
items.append(iterator.next())
except StopIteration:
items.append(None)
distance -= 1
# Main loop
for i in iterator:
yield tuple(items)
items = items[1:] + [i]
# Pad with None
for i in range(distance+1):
yield tuple(items)
items = items[1:] + [None]
raise StopIteration
def is_sentence_break(line):
# blank lines separate sentences
return re.match(r'^\s*$', line)
def is_document_break(line):
# special character sequence separating documents
return re.match(r'^===*\s+O\s*$', line) or re.match(r'^-DOCSTART-', line)
def is_post_document_break(line, next_line, next_next_line):
# Heuristic match for likely doc break before current sentence.
# Note: this doesn't add a break at the current sentence, but
# before it. (See e.g. line 278 in esp.train)
return (next_next_line is not None and
re.match(r'^\s*$', next_line) and
re.match(r'^-+\s+O\s*$', next_next_line))
def parse_token_line(line):
# The format for spanish is is word and BIO tag separated by
# space, and for dutch word, POS and BIO tag separated by
# space. Try both.
m = re.match(r'^(\S+)\s(\S+)$', line)
if not m:
m = re.match(r'^(\S+)\s\S+\s(\S+)$', line)
assert m, "Error parsing line: %s" % line
return m.groups()
def parse_tag(tag):
m = re.match(r'^([BIO])((?:-[A-Za-z_]+)?)$', tag)
assert m, "ERROR: failed to parse tag '%s' in %s" % (tag, fn)
ttag, ttype = m.groups()
if len(ttype) > 0 and ttype[0] == "-":
ttype = ttype[1:]
return ttag, ttype
def _parse_conll(source):
# Implementation for parse_conll() don't invoke directly.
# Store (token, BIO-tag, type) triples for sentence
sentences = []
current = []
# We need lookahead for the document break heuristic.
for ln, next_three_lines in enumerate(lookahead(source, 2)):
line, l2, l3 = next_three_lines
line = line.strip()
if is_sentence_break(line):
sentences.append(current)
current = []
continue
if is_document_break(line):
yield sentences
sentences = []
continue
if is_post_document_break(line, l2, l3):
yield sentences
sentences = []
# Go on to process current token normally
# Normal line.
token, tag = parse_token_line(line)
ttag, ttype = parse_tag(tag)
current.append((token, ttag, ttype))
# Process leftovers, if any
sentences.append(current)
yield sentences
def parse_conll(source):
"""Parse CoNLL 2002 data, yield documents in (token, tag, type) format."""
for sentences in _parse_conll(source):
# Filter out empty sentences and documents, yield nonempties.
sentences = [s for s in sentences if len(s) > 0]
if len(sentences) > 0:
yield sentences
def convert_conll(source, callback):
"""Convert CoNLL 2002 data, invoke callback with text and standoffs."""
for sentences in parse_conll(source):
text, standoffs = text_and_standoffs(sentences)
callback(text, standoffs)
def select_writer(args):
if args.format == 'oa':
return write_oa
elif args.format == 'ann':
return write_ann
else:
assert False, 'internal error'
def main(argv):
# Take an optional "-o" arg specifying an output directory for the results
args = argparser().parse_args(argv[1:])
writer = select_writer(args)
for fn in args.file:
output = make_output_function(args.output, fn, writer)
with codecs.open(fn, encoding=INPUT_ENCODING) as f:
convert_conll(f, output)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
r'''
In practice, :class:`~pyop.linop.LinearOperator` instances composed of distinct
sub-blocks are common. In general, access to both the constituent operators and
larger operators is desired. The :mod:`~pyop.block` module provides several
functions to help build block instances of :class:`~pyop.linop.LinearOperator`
from simpler components.
:func:`~pyop.block.hstack` and :func:`~pyop.block.vstack` are the workhorses
of :mod:`~pyop.block` and are analogous to their numpy counterparts. These
functions can be used alone to squash a row or column of
:class:`~pyop.linop.LinearOperator` instances, respectively, into a single
:class:`~pyop.linop.LinearOperator` instance. These functions expect a
list of :class:`~pyop.linop.LinearOperator` instances as input.
:func:`~pyop.block.bmat` is the generic block-builder, and takes in a
list of lists of :class:`~pyop.linop.LinearOperator` instances and returns
a single block operator. :func:`~pyop.block.bmat` assumes the list of
lists is in row-major order.
:func:`~pyop.block.blockDiag` allows for easy creation of common block
diagonal operators, given a list of the diagonal component operators.
.. math::
E = \begin{bmatrix} A & B \\ C & D \end{bmatrix}
We can easily build :math:`E` or just its rows and columns using
:mod:`~pyop.block` functions. ::
A = LinearOperator((10, 12), forward, adjoint)
B = LinearOperator((10, 30), forward, adjoint)
C = LinearOperator((15, 12), forward, adjoint)
D = LinearOperator((15, 30), forward, adjoint)
row1 = hstack([A, B])
row2 = hstack([C, D])
col1 = vstack([A, C])
col2 = vstack([B, D])
E1 = vstack([row1, row2])
E2 = hstack([col1, col2])
E3 = bmat([[A, B], [C, D]])
In this example, `E1`, `E2`, and `E3` are equivalent.
We can also easily make block diagonal operators with
:func:`~pyop.block.blockDiag`:
.. math::
D = \begin{bmatrix} A & \mathbf{0} & \mathbf{0} \\ \mathbf{0} & B &
\mathbf{0} \\ \mathbf{0} & \mathbf{0} & C \end{bmatrix}
::
A = LinearOperator(op_shape1, forward1, adjoint1)
B = LinearOperator(op_shape2, forward2, adjoint2)
C = LinearOperator(op_shape3, forward3, adjoint3)
D = blockDiag([A, B, C])
'''
from numpy import vsplit, vstack, tile, concatenate, cumsum, add
from numpy import vstack as npvstack
from pyop import LinearOperator, matmat
from scipy.misc import doccer
import six
docdict = {
'blocks' :
'''blocks : [LinearOperator]
A list of LinearOperator objects.''',
'LinearOperator' :
'''LinearOperator
The new block operator.''',
## The see also section.
'blockDiag' : '''blockDiag : Construct a LinearOperator from block
diagonal components.''',
'bmat' : '''bmat : Construct a LinearOperator from LinearOperator
subcomponents.''',
'hstack' : '''hstack : Squash a row of LinearOperators to a single block
LinearOperator.''',
'vstack' : '''vstack : Squash a column of LinearOperators to a single
block LinearOperator.'''
}
docfill = doccer.filldoc(docdict)
@docfill
def bmat(blocks):
''' Converts a list of lists into a new operator.
The new operator is composed of blocks described by the list.
Parameters
----------
blocks : [[LinearOperator]]
A list of lists, with each base component a linear operator (objects
instantiated from the LinearOperator class).
Returns
-------
%(LinearOperator)s
See Also
--------
%(blockDiag)s
%(hstack)s
%(vstack)s
Examples
--------
>>> from pyop import toLinearOperator, toMatrix
>>> from pyop.block import bmat
>>> from numpy import array
>>> A = toLinearOperator(array([[1., 2.], [0., 4.]]))
>>> B = toLinearOperator(array([[3., 0.], [5., 0.]]))
>>> C = toLinearOperator(array([[6., 0., 0., 7], [0., 8., 9., 0.]]))
>>> blocks = [[A, B], [C]]
>>> D = bmat(blocks)
>>> toMatrix(D)
array([[ 1., 2., 3., 0.],
[ 0., 4., 5., 0.],
[ 6., 0., 0., 7.],
[ 0., 8., 9., 0.]])
'''
if len(blocks) == 0:
raise ValueError('Empty list supplied to block operator.')
## First collapse the rows using horz_cat.
vert_block_op = [hstack(row) for row in blocks]
## Next collapse the column into one block operator using vert_cat.
block_op = vstack(vert_block_op)
return block_op
@docfill
def blockDiag(blocks):
''' Converts a list of operators into a new operator.
The new operator is composed of diagonal blocks described by the list.
Parameters
----------
%(blocks)s
Returns
-------
%(LinearOperator)s
See Also
--------
%(bmat)s
Examples
--------
>>> from pyop.block import blockDiag
>>> from pyop import toLinearOperator, toMatrix
>>> from numpy import array
>>> A = toLinearOperator(array([[1., 2.], [3., 4.]]))
>>> B = toLinearOperator(array([[5., 6.], [7., 8.]]))
>>> C = blockDiag([A, B])
>>> toMatrix(C)
array([[ 1., 2., 0., 0.],
[ 3., 4., 0., 0.],
[ 0., 0., 5., 6.],
[ 0., 0., 7., 8.]])
'''
if len(blocks) == 0:
raise ValueError('Empty list supplied to diagonal block operator.')
rows = sum(b.shape[0] for b in blocks)
cols = sum(b.shape[1] for b in blocks)
## Generate a list containing the indices to split the vector x
## to be sent to each component of the block operator.
forward_splitting_idx = cumsum([b.shape[1] for b in blocks])
adjoint_splitting_idx = cumsum([b.shape[0] for b in blocks])
@matmat
def forwardFunction(x):
## Split vector subcomponents on the block operator lengths.
## TODO: Rename for general matrix inputs.
vec_components = vsplit(x, forward_splitting_idx)
## Apply each operator to corresponding subvector and concatenate
## the results.
sub_outvecs = (b(v) for (b, v) in six.moves.zip(blocks,
vec_components))
## Concatenate the output sub-vectors together.
return npvstack(sub_outvecs)
@matmat
def adjointFunction(x):
## Split vector subcomponents on the block operator lengths.
## TODO: Rename for general matrix inputs.
vec_components = vsplit(x, adjoint_splitting_idx)
## Apply each operator to corresponding subvector and concatenate
## the results.
sub_outvecs = (b.T(v) for (b, v) in six.moves.zip(blocks,
vec_components))
## Concatenate the output sub-vectors together.
return npvstack(sub_outvecs)
return LinearOperator((rows, cols),
forwardFunction,
adjointFunction)
def __horzcat(horz_blocks):
''' Converts list of horizontal operators into one linear operator.'''
## Generate a list containing the indices to split the vector x
## to be sent to each component of the block operator.
splitting_idx = cumsum([b.shape[1] for b in horz_blocks])
@matmat
def opFunction(x):
## Split vector subcomponents based on the block operator lengths.
## TODO: Rename all of these to imply matrix not vector
vec_components = vsplit(x, splitting_idx)
## Apply each operator to its corresponding subvector and add the
## results. Two cases for forward and adjoint functions.
sub_outvecs = (b(v) for (b, v) in six.moves.zip(horz_blocks,
vec_components))
## Add the vectors together.
return sum(sub_outvecs)
return opFunction
def __vertcat(vert_blocks):
''' Converts list of vertical operators into one operator.'''
@matmat
def opFunction(x):
## Apply each operator (forward or adjoint) to the input vector to
## get output vector sub-components.
sub_outvecs = (b(x) for b in vert_blocks)
## Concatenate the output sub-vectors together.
return npvstack(sub_outvecs)
return opFunction
@docfill
def hstack(blocks):
''' Converts list of operators into one operator.
The new operator is created assuming the list corresponds to a row of
blocks in a larger block matrix-like operator.
Parameters
----------
%(blocks)s
Returns
-------
%(LinearOperator)s
See Also
--------
%(vstack)s
%(bmat)s
Examples
--------
>>> from pyop.block import hstack
>>> from pyop import toLinearOperator, toMatrix
>>> from numpy import array
>>> A = toLinearOperator(array([[1., 2.], [4., 5.]]))
>>> B = toLinearOperator(array([[3.], [6.]]))
>>> C = hstack([A, B])
>>> toMatrix(C)
array([[ 1., 2., 3.],
[ 4., 5., 6.]])
'''
if len(blocks) == 0:
raise ValueError('Horizontal concatenation of empty list.')
rows = blocks[0].shape[0]
cols = sum(h.shape[1] for h in blocks)
if not all(b.shape[0] == rows for b in blocks):
raise ValueError('Block operator horizontal concatenation failed: '
'row mismatch.')
return LinearOperator((rows, cols),
__horzcat(blocks),
__vertcat([h.T for h in blocks]))
@docfill
def vstack(blocks):
''' Converts list of operators into one operator.
The new operator is created assuming the list corresponds to a column of
blocks in a larger block matrix-like operator.
Parameters
----------
%(blocks)s
Returns
-------
%(LinearOperator)s
See Also
--------
%(hstack)s
%(bmat)s
Examples
--------
>>> from pyop.block import vstack
>>> from pyop import toLinearOperator, toMatrix
>>> from numpy import array
>>> A = toLinearOperator(array([[1., 4.], [2., 5.]]))
>>> B = toLinearOperator(array([[3., 6.]]))
>>> C = vstack([A, B])
>>> toMatrix(C)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]])
'''
if len(blocks) == 0:
raise ValueError('Vertical concatenation of empty list.')
rows = sum(v.shape[0] for v in blocks)
cols = blocks[0].shape[1]
if not all(b.shape[1] == cols for b in blocks):
raise ValueError('Block operator vertical concatenation failed: '
'column mismatch.')
return LinearOperator((rows, cols),
__vertcat(blocks),
__horzcat([v.T for v in blocks]))
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions for managing the installer."""
import logging
import os
import time
# do not remove: internal placeholder 1
from glazier.chooser import chooser
from glazier.lib import log_copy
from glazier.lib import registry
from glazier.lib import stage
from glazier.lib.actions import file_system
from glazier.lib.actions.base import ActionError
from glazier.lib.actions.base import BaseAction
from glazier.lib.actions.base import RestartEvent
from glazier.lib.actions.base import ServerChangeEvent
from glazier.lib.actions.base import ValidationError
import yaml
from glazier.lib import constants
class AddChoice(BaseAction):
"""Add a pending question for display in the UI."""
def _Setup(self):
self._realtime = True
def Run(self):
self._build_info.AddChooserOption(self._args)
def Validate(self):
choice = self._args
self._TypeValidator(choice, dict)
for f in ['name', 'type', 'prompt', 'options']:
if f not in choice:
raise ValidationError('Missing required field %s: %s' % (f, choice))
for f in ['name', 'type', 'prompt']:
self._TypeValidator(choice[f], str)
self._TypeValidator(choice['options'], list)
for opt in choice['options']:
self._TypeValidator(opt, dict)
if 'label' not in opt:
raise ValidationError('Missing required field %s: %s' % ('label', opt))
self._TypeValidator(opt['label'], str)
if 'value' not in opt:
raise ValidationError('Missing required field %s: %s' % ('value', opt))
self._TypeValidator(opt['value'], (bool, str))
if 'tip' in opt:
self._TypeValidator(opt['tip'], str)
if 'default' in opt:
self._TypeValidator(opt['default'], bool)
class BuildInfoDump(BaseAction):
"""Dump build information to disk."""
def Run(self):
path = os.path.join(constants.SYS_CACHE, 'build_info.yaml')
logging.debug('Dumping build information to file: %s', path)
self._build_info.Serialize(path)
class BuildInfoSave(BaseAction):
"""Save build information to the registry."""
def _WriteRegistry(self, reg_values):
"""Populates the registry with build_info settings for future reference.
Args:
reg_values: A dictionary of key/value pairs to be added to the registry.
"""
for value_name in reg_values:
key_path = constants.REG_ROOT
value_data = reg_values[value_name]
if 'TIMER_' in value_name:
key_path = r'{0}\{1}'.format(constants.REG_ROOT, 'Timers')
try:
registry.set_value(value_name, value_data, 'HKLM', key_path)
except registry.Error as e:
raise ActionError(e) from e
def Run(self):
path = os.path.join(constants.SYS_CACHE, 'build_info.yaml')
if os.path.exists(path):
with open(path) as handle:
input_config = yaml.safe_load(handle)
self._WriteRegistry(input_config['BUILD'])
os.remove(path)
else:
logging.debug('%s does not exist - skipped processing.', path)
class ChangeServer(BaseAction):
"""Move to a different Glazier server."""
def _Setup(self):
self._realtime = True
def Run(self):
self._build_info.ConfigServer(set_to=self._args[0])
self._build_info.ActiveConfigPath(set_to=self._args[1])
raise ServerChangeEvent('Action triggering server change.')
def Validate(self):
self._ListOfStringsValidator(self._args, 2)
class ExitWinPE(BaseAction):
"""Exit the WinPE environment to start host configuration."""
def Run(self):
cp = file_system.CopyFile(
[constants.WINPE_TASK_LIST, constants.SYS_TASK_LIST], self._build_info)
cp.Run()
cp = file_system.CopyFile(
[constants.WINPE_BUILD_LOG, constants.SYS_BUILD_LOG], self._build_info)
cp.Run()
raise RestartEvent(
'Leaving WinPE', timeout=10, task_list_path=constants.SYS_TASK_LIST)
class LogCopy(BaseAction):
"""Upload build logs for collection."""
def Run(self):
file_name = str(self._args[0])
share = None
if len(self._args) > 1:
share = str(self._args[1])
logging.debug('Found log copy event for file %s to %s.', file_name, share)
copier = log_copy.LogCopy()
# EventLog
try:
copier.EventLogCopy(file_name)
except log_copy.LogCopyError as e:
logging.warning('Unable to complete log copy to EventLog. %s', e)
# CIFS
if share:
try:
copier.ShareCopy(file_name, share)
except log_copy.LogCopyError as e:
logging.warning('Unable to complete log copy via CIFS. %s', e)
def Validate(self):
self._ListOfStringsValidator(self._args, 1, 2)
class ShowChooser(BaseAction):
"""Show the Chooser UI."""
def Run(self):
ui = chooser.Chooser(options=self._build_info.GetChooserOptions())
ui.Display()
responses = ui.Responses()
self._build_info.StoreChooserResponses(responses)
self._build_info.FlushChooserOptions()
def _Setup(self):
self._realtime = True
class Sleep(BaseAction):
"""Pause the installer."""
def Run(self):
duration = int(self._args[0])
converted_time = time.strftime('%H:%M:%S', time.gmtime(duration))
if len(self._args) > 1:
logging.info('Sleeping for %s (%s).', converted_time, str(self._args[1]))
else:
logging.info('Sleeping for %s before continuing...', converted_time)
time.sleep(duration)
def Validate(self):
self._TypeValidator(self._args, list)
if len(self._args) > 2:
raise ValidationError('Invalid args length: %s' % self._args)
self._TypeValidator(self._args[0], int)
if len(self._args) > 1:
self._TypeValidator(self._args[1], str)
class StartStage(BaseAction):
"""Start a new stage of the installation."""
def Run(self):
try:
stage.set_stage(int(self._args[0]))
# Terminal stages exit immediately; the build should be complete.
if len(self._args) > 1 and self._args[1]:
stage.exit_stage(int(self._args[0]))
except stage.Error as e:
raise ActionError(e) from e
def Validate(self):
self._TypeValidator(self._args, list)
if len(self._args) > 2:
raise ValidationError('Invalid args length: %s' % self._args)
self._TypeValidator(self._args[0], int)
if len(self._args) > 1:
self._TypeValidator(self._args[1], bool)
|
|
from __future__ import absolute_import
from copy import deepcopy
from datetime import datetime
from django.core.exceptions import MultipleObjectsReturned, FieldError
from django.test import TestCase
from django.utils.translation import ugettext_lazy
from .models import Article, Reporter
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime(2006, 1, 17))
self.r.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with self.assertRaisesRegexp(TypeError, "'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'
).extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith']),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id,self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r,self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime(2005, 7, 27), reporter_id=self.r.id)
a4 = Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id,a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id,a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a,a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
a1 = Article.objects.create(headline='First', pub_date=datetime(1980, 4, 23), reporter=r1)
a2 = Article.objects.create(headline='Second', pub_date=datetime(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime(1980, 4, 23, 0, 0),
datetime(2005, 7, 27, 0, 0),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime(1980, 4, 1, 0, 0),
datetime(2005, 7, 1, 0, 0),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime(1980, 1, 1, 0, 0),
datetime(2005, 1, 1, 0, 0),
])
def test_delete(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime(2006, 1, 17))
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime(2005, 7, 27), reporter_id=self.r.id)
a4 = Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_regression_12876(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime(2011, 5, 7))))
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertTrue(r1.article_set.__class__ is r1.article_set.__class__)
# Same as each other
self.assertTrue(r1.article_set.__class__ is r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='[email protected]')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime(2011, 6, 10))
notlazy = unicode(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(Reporter._meta.get_all_field_names()),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(['EXTRA',] + Article._meta.get_all_field_names()),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield')
|
|
'''
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/
a_simple_unix_linux_daemon_in_python/www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (David Mytton <[email protected]>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't
work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (David Mytton <[email protected]>)
- Fixed problem with daemon exiting on Python 2.4
(before SystemExit was part of the Exception base)
13th Aug 2010 (David Mytton <[email protected]>
- Fixed unhandled exception if PID file is empty
'''
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull,
stdout=os.devnull, stderr=os.devnull,
home_dir='.', umask=0o22, verbose=1):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
if self.stderr:
se = open(self.stderr, 'ab+', 0)
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print("Started")
# Write pidfile
atexit.register(
self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
open(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print("Starting...")
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print("Stopping...")
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is
# empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
if self.verbose >= 1:
print("Stopped")
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def get_pid(self):
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def is_running(self):
pid = self.get_pid()
print(pid)
return pid and os.path.exists('/proc/%d' % pid)
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import traceback
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import context as cinder_context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder.image import image_utils
from cinder import objects
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
If rescheduling doesn't occur this task errors out the volume.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume_id',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Update volume's timestamp and host.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
# We don't need to update the volume's status to creating, since
# we haven't changed it to error.
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail.
LOG.exception(_LE("Volume %s: update volume state failed."),
volume_id)
def _reschedule(self, context, cause, request_spec, filter_properties,
volume_id):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume_id)
def revert(self, context, result, flow_failures, volume_id, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store
# through get_revert_result method.
# If do not want to be rescheduled, just set the volume's status to
# error and return.
if not self.do_reschedule:
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
return False
# Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
common.error_out_volume(context, self.db, volume_id)
LOG.error(_LE("Volume %s: create failed"), volume_id)
return False
# Use a different context when rescheduling.
if self.reschedule_context:
cause = list(flow_failures.values())[0]
context = self.reschedule_context
try:
self._pre_reschedule(context, volume_id)
self._reschedule(context, cause, volume_id=volume_id, **kwargs)
self._post_reschedule(volume_id)
return True
except exception.CinderException:
LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
return False
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'volume_ref'
def __init__(self, db, host, set_error=True):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
self.set_error = set_error
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error:
return
reason = _('Volume create failed while extracting volume ref.')
common.error_out_volume(context, self.db, volume_id, reason=reason)
LOG.error(_LE("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['volume_ref', 'request_spec']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume_ref, request_spec):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume_ref['name']
volume_size = utils.as_int(volume_ref['size'], quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume_ref['status'],
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image]
'volume_id': volume_ref['id'],
'volume_name': volume_name,
'volume_size': volume_size,
}
if volume_ref.get('snapshot_id'):
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': volume_ref['snapshot_id'],
})
elif volume_ref.get('source_volid'):
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = volume_ref.get('source_volid')
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref['status'],
'type': 'source_vol',
})
elif request_spec.get('source_replicaid'):
# We are making a clone based on the replica.
#
# NOTE(harlowja): This will likely fail if the replica
# disappeared by the time this call occurred.
source_volid = request_spec['source_replicaid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_replicaid': source_volid,
'source_replicastatus': source_volume_ref['status'],
'type': 'source_replica',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception(_LE("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s"),
{'event': self.event_suffix, 'volume_id': volume_id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume'
def __init__(self, manager, db, driver, image_volume_cache=None):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.manager = manager
self.db = db
self.driver = driver
self.image_volume_cache = image_volume_cache
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume_id)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume_id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume_id)
elif kwargs.get('source_replicaid'):
src_type = 'source replica'
src_id = kwargs['source_replicaid']
source_replicaid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_replicaid,
volume_id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self._capture_volume_image_metadata(context, volume_id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume_ref, snapshot_id,
**kwargs):
volume_id = volume_ref['id']
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
model_update = self.driver.create_volume_from_snapshot(volume_ref,
snapshot)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = self.db.volume_get(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
"bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference"),
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume_id,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug('Marking volume %s as bootable.', volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating volume %(volume_id)s bootable "
"flag to true"), {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
source_volid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
source_volid=source_volid)
return model_update
def _create_from_source_replica(self, context, volume_ref,
source_replicaid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_replicaid)
model_update = self.driver.create_replica_test_volume(volume_ref,
srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context,
volume_ref['id'],
source_replicaid=source_replicaid)
return model_update
def _copy_image_to_volume(self, context, volume_ref,
image_id, image_location, image_service):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"),
{'volume_id': volume_id})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
LOG.exception(_LE("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s"),
{'volume_id': volume_id, 'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.",
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
self.db.volume_glance_metadata_bulk_create(context, volume_id,
volume_metadata)
def _clone_image_volume(self, context, volume, image_location, image_meta):
"""Create a volume efficiently from an existing image.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
if not image_location:
return None, False
if (image_meta.get('container_format') != 'bare' or
image_meta.get('disk_format') != 'raw'):
LOG.info(_LI("Requested image %(id)s is not in raw format."),
{'id': image_meta.get('id')})
return None, False
image_volume = None
direct_url, locations = image_location
urls = set([direct_url] + [loc.get('url') for loc in locations or []])
image_volume_ids = [url[9:] for url in urls
if url and url.startswith('cinder://')]
image_volumes = self.db.volume_get_all_by_host(
context, volume['host'], filters={'id': image_volume_ids})
for image_volume in image_volumes:
# For the case image volume is stored in the service tenant,
# image_owner volume metadata should also be checked.
image_owner = None
volume_metadata = image_volume.get('volume_metadata') or {}
for m in volume_metadata:
if m['key'] == 'image_owner':
image_owner = m['value']
if (image_meta['owner'] != volume['project_id'] and
image_meta['owner'] != image_owner):
LOG.info(_LI("Skipping image volume %(id)s because "
"it is not accessible by current Tenant."),
{'id': image_volume.id})
continue
LOG.info(_LI("Will clone a volume from the image volume "
"%(id)s."), {'id': image_volume.id})
break
else:
LOG.debug("No accessible image volume for image %(id)s found.",
{'id': image_meta['id']})
return None, False
try:
return self.driver.create_cloned_volume(volume, image_volume), True
except (NotImplementedError, exception.CinderException):
LOG.exception(_LE('Failed to clone image volume %(id)s.'),
{'id': image_volume['id']})
return None, False
def _create_from_image_download(self, context, volume_ref, image_location,
image_id, image_service):
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(updates)s"),
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
return model_update
def _create_from_image_cache(self, context, internal_context, volume_ref,
image_id, image_meta):
"""Attempt to create the volume using the image cache.
Best case this will simply clone the existing volume in the cache.
Worst case the image is out of date and will be evicted. In that case
a clone will not be created and the image must be downloaded again.
"""
LOG.debug('Attempting to retrieve cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume_ref['host']})
try:
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume_ref,
image_id,
image_meta)
if cache_entry:
LOG.debug('Creating from source image-volume %(volume_id)s',
{'volume_id': cache_entry['volume_id']})
model_update = self._create_from_source_volume(
context,
volume_ref,
cache_entry['volume_id']
)
return model_update, True
except exception.CinderException as e:
LOG.warning(_LW('Failed to create volume from image-volume cache, '
'will fall back to default behavior. Error: '
'%(exception)s'), {'exception': e})
return None, False
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# First see if the driver can clone the image directly.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(context,
volume_ref,
image_location,
image_meta,
image_service)
# Try and clone the image if we have it set as a glance location.
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume_ref,
image_location,
image_meta)
# Try and use the image cache.
should_create_cache_entry = False
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
LOG.warning(_LW('Unable to get Cinder internal context, will '
'not use image-volume cache.'))
if not cloned and internal_context and self.image_volume_cache:
model_update, cloned = self._create_from_image_cache(
context,
internal_context,
volume_ref,
image_id,
image_meta
)
if not cloned:
should_create_cache_entry = True
# Fall back to default behavior of creating volume,
# download the image data and copy it into the volume.
original_size = volume_ref['size']
try:
if not cloned:
with image_utils.TemporaryImages.fetch(
image_service, context, image_id) as tmp_image:
# Try to create the volume as the minimal size, then we can
# extend once the image has been downloaded.
if should_create_cache_entry:
data = image_utils.qemu_img_info(tmp_image)
virtual_size = int(
math.ceil(float(data.virtual_size) / units.Gi))
if virtual_size > volume_ref.size:
params = {'image_size': virtual_size,
'volume_size': volume_ref.size}
reason = _("Image virtual size is %(image_size)dGB"
" and doesn't fit in a volume of size"
" %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(
image_id=image_id, reason=reason)
if virtual_size and virtual_size != original_size:
updates = {'size': virtual_size}
volume_ref = self.db.volume_update(
context,
volume_ref['id'],
updates
)
model_update = self._create_from_image_download(
context,
volume_ref,
image_location,
image_id,
image_service
)
if should_create_cache_entry:
# Update the newly created volume db entry before we clone it
# for the image-volume creation.
if model_update:
volume_ref = self.db.volume_update(context,
volume_ref['id'],
model_update)
self.manager._create_image_cache_volume_entry(internal_context,
volume_ref,
image_id,
image_meta)
finally:
# If we created the volume as the minimal size, extend it back to
# what was originally requested. If an exception has occurred we
# still need to put this back before letting it be raised further
# up the stack.
if volume_ref['size'] != original_size:
self.driver.extend_volume(volume_ref, original_size)
updates = {'size': original_size}
self.db.volume_update(context, volume_ref['id'], updates)
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_raw_volume(self, volume_ref, **kwargs):
return self.driver.create_volume(volume_ref)
def execute(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume_ref['id']
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error(_LE("Unable to create volume. "
"Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s"),
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(volume_ref=volume_ref,
**volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context,
volume_ref=volume_ref,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'source_replica':
model_update = self._create_from_source_replica(
context, volume_ref=volume_ref, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume_ref=volume_ref,
**volume_spec)
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception(_LE("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s"),
{'volume_id': volume_id, 'model': model_update})
raise
return volume_ref
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update)
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
"%(update)s"), {'volume_id': volume_id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): "
"created successfully"),
{'volume_name': volume_spec['volume_name'],
'volume_id': volume_id})
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id,
allow_reschedule, reschedule_context, request_spec,
filter_properties, image_volume_cache=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has started to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches a on-success *only* task that notifies that the volume creation
has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'request_spec': request_spec,
'volume_id': volume_id,
}
volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
retry = filter_properties.get('retry', None)
# Always add OnFailureRescheduleTask and we handle the change of volume's
# status when reverting the flow. Meanwhile, no need to revert process of
# ExtractVolumeRefTask.
do_reschedule = allow_reschedule and request_spec and retry
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db,
scheduler_rpcapi,
do_reschedule))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(manager,
db,
driver,
image_volume_cache),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
|
|
import json, re
import numpy as np
from keras.layers import Dense, LSTM, Conv1D, Flatten, Reshape, Bidirectional, Input, MaxPooling1D, dot, add, GRU, Lambda
from keras.layers.embeddings import Embedding
from keras.models import Model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_sample_weight, compute_class_weight
from utilities import word2vecReader, evaluation
from wordsegment import load
import sys
from sklearn.model_selection import StratifiedKFold
from keras_self_attention import SeqSelfAttention
reload(sys)
sys.setdefaultencoding('utf8')
vocabSize = 10000
tweetLength = 25
yelpLength = 100
embeddingVectorLength = 200
batch_size = 100
dayMapper = {'Mon': 1, 'Tue': 2, 'Wed': 3, 'Thu': 4, 'Fri': 5, 'Sat': 6, 'Sun': 0}
POSMapper = {'N': 'N', 'O': 'N', '^': 'N', 'S': 'N', 'Z': 'N', 'L': 'N', 'M': 'N',
'V': 'V', 'A': 'A', 'R': 'R', '@': '@', '#': '#', '~': '~', 'E': 'E', ',': ',', 'U': 'U',
'!': '0', 'D': '0', 'P': '0', '&': '0', 'T': '0', 'X': '0', 'Y': '0', '$': '0', 'G': '0'}
load()
def removeLinks(input):
urls = re.findall("(?P<url>https?://[^\s]+)", input)
if len(urls) != 0:
for url in urls:
input = input.replace(url, '')
return input
def hourMapper(hour):
input = int(hour)
if 0 <= input < 6:
output = 0
elif 6 <= input < 12:
output = 1
elif 12 <= input < 18:
output = 2
else:
output = 3
return output
def overSample(inputList, ratio):
if ratio == 1.0:
return []
outputList = []
size = len(inputList)
for i in range(int(size*(ratio-1))):
outputList.append(inputList[i%size])
return outputList
def extractPOS(inputList, mode='all', breakEmoji=True):
posOutput = ''
contentOutput = ''
for item in inputList:
if breakEmoji:
emojis1 = re.findall(r'\\u....', item[0].encode('unicode-escape'))
emojis2 = re.findall(r'\\U........', item[0].encode('unicode-escape'))
emojis = emojis1 + emojis2
if len(emojis) > 0:
for emoji in emojis:
contentOutput += emoji + ' '
posOutput += 'E' + ' '
else:
contentOutput += item[0] + ' '
if mode == 'all':
posOutput += item[1] + ' '
else:
posOutput += POSMapper[item[1]] + ' '
else:
contentOutput += item[0] + ' '
if mode == 'all':
posOutput += item[1] + ' '
else:
posOutput += POSMapper[item[1]] + ' '
if len(contentOutput.split(' ')) != len(posOutput.split(' ')):
print('error')
print(contentOutput)
return contentOutput.lower().strip().encode('utf-8'), posOutput.strip().encode('utf-8')
def loadYelpData(modelName, char, embedding):
print('Loading...')
if 'yelp' in modelName:
textLength = yelpLength
else:
textLength = tweetLength
contents_train = []
contents_val = []
labels_train = []
labels_val = []
ids_train = []
ids_val = []
trainFile = open('data/yelp/consolidateData_' + modelName + '_train.json', 'r')
valFile = open('data/yelp/consolidateData_' + modelName + '_test.json', 'r')
for line in trainFile:
data = json.loads(line.strip())
histLen = len(data['reviews'])
contents_train.append(data['reviews'][histLen-1]['text'].lower().encode('utf-8'))
labels_train.append(data['reviews'][histLen-1]['stars'])
ids_train.append(str(data['reviews'][histLen-1]['review_id']))
ids_train = np.array(ids_train)
labels_train = np.array(labels_train)
for line in valFile:
data = json.loads(line.strip())
histLen = len(data['reviews'])
contents_val.append(data['reviews'][histLen-1]['text'].lower().encode('utf-8'))
labels_val.append(data['reviews'][histLen-1]['stars'])
ids_val.append(str(data['reviews'][histLen-1]['review_id']))
ids_val = np.array(ids_val)
labels_val = np.array(labels_val)
if char:
tk = Tokenizer(num_words=vocabSize, char_level=char, filters='')
else:
tk = Tokenizer(num_words=vocabSize, char_level=char)
tk.fit_on_texts(contents_train + contents_val)
textSequences_train = tk.texts_to_sequences(contents_train)
textVector_train = sequence.pad_sequences(textSequences_train, maxlen=textLength, truncating='post', padding='post')
textSequences_val = tk.texts_to_sequences(contents_val)
textVector_val = sequence.pad_sequences(textSequences_val, maxlen=textLength, truncating='post', padding='post')
if embedding == 'glove':
embeddings_index = {}
embFile = open('../tweetEmbeddingData/glove.6B.200d.txt', 'r')
for line in embFile:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embFile.close()
print('Found %s word vectors.' % len(embeddings_index))
word_index = tk.word_index
embMatrix = np.zeros((len(word_index) + 1, 200))
for word, i in word_index.items():
embVector = embeddings_index.get(word)
if embVector is not None:
embMatrix[i] = embVector
elif embedding == 'word2vec':
word_index = tk.word_index
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
embMatrix = np.zeros((len(word_index) + 1, 400))
for word, i in word_index.items():
if word in embModel:
embMatrix[i] = embModel[word]
else:
embMatrix = None
word_index = None
return ids_train, ids_val, labels_train, labels_val, contents_train, contents_val, textVector_train, textVector_val, embMatrix, word_index
def loadData(modelName, char, embedding, dev=False):
print('Loading...')
contents_train = []
contents_val = []
labels_train = []
labels_val = []
places_train = []
places_val = []
ids_train = []
ids_val = []
trainFile = open('data/consolidateData_' + modelName + '_train.json', 'r')
if dev:
valFile = open('data/consolidateData_' + modelName + '_test.json', 'r')
else:
valFile = open('data/consolidateData_' + modelName + '_dev.json', 'r')
for line in trainFile:
data = json.loads(line.strip())
contents_train.append(data['content'].lower().encode('utf-8'))
labels_train.append(data['label'])
places_train.append(data['place'])
ids_train.append(str(data['id']))
places_train = np.array(places_train)
ids_train = np.array(ids_train)
labels_train = np.array(labels_train)
for line in valFile:
data = json.loads(line.strip())
contents_val.append(data['content'].lower().encode('utf-8'))
labels_val.append(data['label'])
places_val.append(data['place'])
ids_val.append(str(data['id']))
places_val = np.array(places_val)
ids_val = np.array(ids_val)
labels_val = np.array(labels_val)
if char:
tk = Tokenizer(num_words=vocabSize, char_level=char, filters='')
else:
tk = Tokenizer(num_words=vocabSize, char_level=char)
tk.fit_on_texts(contents_train + contents_val)
tweetSequences_train = tk.texts_to_sequences(contents_train)
tweetVector_train = sequence.pad_sequences(tweetSequences_train, maxlen=tweetLength, truncating='post', padding='post')
tweetSequences_val = tk.texts_to_sequences(contents_val)
tweetVector_val = sequence.pad_sequences(tweetSequences_val, maxlen=tweetLength, truncating='post', padding='post')
if embedding == 'glove':
embeddings_index = {}
embFile = open('../tweetEmbeddingData/glove.twitter.27B.200d.txt', 'r')
for line in embFile:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embFile.close()
print('Found %s word vectors.' % len(embeddings_index))
word_index = tk.word_index
embMatrix = np.zeros((len(word_index) + 1, 200))
for word, i in word_index.items():
embVector = embeddings_index.get(word)
if embVector is not None:
embMatrix[i] = embVector
elif embedding == 'word2vec':
word_index = tk.word_index
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
embMatrix = np.zeros((len(word_index) + 1, 400))
for word, i in word_index.items():
if word in embModel:
embMatrix[i] = embModel[word]
else:
embMatrix = None
word_index = None
return ids_train, ids_val, labels_train, labels_val, places_train, places_val, contents_train, contents_val, tweetVector_train, tweetVector_val, embMatrix, word_index
def manageData(trainDataLists, testDataLists):
print ('Manage train and test data split...')
output = {}
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
totalDataList = []
for index, trainData in enumerate(trainDataLists):
testData = testDataLists[index]
totalData = np.concatenate((trainData, testData))
totalDataList.append(totalData)
for fold, (train_index, test_index) in enumerate(skf.split(totalDataList[4], totalDataList[1])):
trainDataList = []
testDataList = []
for data in totalDataList:
trainDataList.append(data[train_index])
testDataList.append(data[test_index])
output[fold] = (trainDataList, testDataList)
return output
def processRNN(modelName, RNN='LSTM', balancedWeight='None', embedding='None', char=False, epochs=4, dev=False):
if RNN == 'LSTM':
resultName = 'result/LSTM_' + modelName + '_' + balancedWeight
elif RNN == 'GRU':
resultName = 'result/GRU_' + modelName + '_' + balancedWeight
if 'yelp' in modelName:
ids_train, ids_val, labels_train, labels_val, contents_train, contents_val, textVector_train, textVector_val, embMatrix, word_index = loadYelpData(modelName, char, embedding)
textLength = yelpLength
else:
ids_train, ids_val, labels_train, labels_val, places_train, places_val, contents_train, contents_val, textVector_train, textVector_val, embMatrix, word_index = loadData(modelName, char, embedding, dev=dev)
textLength = tweetLength
labelNum = len(np.unique(np.concatenate([labels_train, labels_val])))
encoder = LabelEncoder()
encoder.fit(np.concatenate([labels_train, labels_val]))
labels_train = encoder.transform(labels_train)
labels_val = encoder.transform(labels_val)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
#trainDataList = [ids_train, labels_train, places_train, contents_train, tweetVector_train]
#testDataList = [ids_val, labels_val, places_val, contents_val, tweetVector_val]
#expData = manageData(trainDataList, testDataList)
# training
if dev:
verbose = 2
else:
verbose = 0
print('training...')
eval = evaluation.evalMetrics(labelNum)
inputs = Input(batch_shape=(batch_size, textLength, ), name='tweet_input')
if embedding in ['word2vec', 'glove']:
embedding_tweet = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True)(inputs)
else:
embedding_tweet = Embedding(vocabSize, embeddingVectorLength)(inputs)
if RNN == 'LSTM':
tweet_lstm = LSTM(200, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn')(embedding_tweet)
elif RNN == 'GRU':
tweet_lstm = GRU(300, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn')(embedding_tweet)
tweet_output = Dense(labelNum, activation='softmax', name='output')(tweet_lstm)
model = Model(inputs=inputs, outputs=tweet_output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#model.summary()
if len(labels_train) % batch_size != 0:
textVector_train = textVector_train[:-(len(textVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
if len(labels_val) % batch_size != 0:
textVector_val = textVector_val[:-(len(textVector_val) % batch_size)]
labels_val = labels_val[:-(len(labels_val) % batch_size)]
ids_val = ids_val[:-(len(ids_val) % batch_size)]
if 'yelp' not in modelName:
places_val = places_val[:-(len(places_val) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
labelVector_val = np_utils.to_categorical(labels_val)
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
trainHistory = model.fit(textVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(textVector_val, labelVector_val), sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
trainHistory = model.fit(textVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(textVector_val, labelVector_val), class_weight=classWeight, verbose=verbose)
else:
trainHistory = model.fit(textVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(textVector_val, labelVector_val), verbose=verbose)
accuracyHist = trainHistory.history['val_acc']
lossHist = trainHistory.history['val_loss']
tuneFile = open(resultName + '.tune', 'a')
for index, loss in enumerate(lossHist):
tuneFile.write(str(index) + '\t' + str(loss)+'\t'+str(accuracyHist[index])+'\n')
tuneFile.write('\n')
tuneFile.close()
scores = model.evaluate(textVector_val, labelVector_val, batch_size=batch_size, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
predictions = model.predict(textVector_val, batch_size=batch_size)
sampleFile = open(resultName + '.sample', 'a')
predLabels = []
trueLabel_val = encoder.inverse_transform(labels_val)
for index, pred in enumerate(predictions):
predLabel = labelList[pred.tolist().index(max(pred))]
if 'yelp' in modelName:
sampleFile.write(ids_val[index] + '\t' + contents_val[index] + '\t' + str(trueLabel_val[index]) + '\t' + str(predLabel) + '\n')
else:
sampleFile.write(ids_val[index] + '\t' + contents_val[index] + '\t' + trueLabel_val[index] + '\t' + predLabel + '\t' + places_val[index] + '\n')
predLabels.append(predLabel)
sampleFile.close()
eval.addEval(scores[1], trueLabel_val, predLabels)
if not dev:
score, scoreSTD = eval.getScore()
precision, preSTD = eval.getPrecision()
recall, recSTD = eval.getRecall()
f1, f1STD = eval.getF1()
conMatrix = eval.getConMatrix()
resultFile = open(resultName + '.result', 'a')
confusionFile = open(resultName + '.confMatrix', 'a')
for row in conMatrix:
lineOut = ''
for line in row:
lineOut += str(line) + '\t'
confusionFile.write(lineOut.strip() + '\n')
confusionFile.write('\n')
resultFile.write(score + '\n')
resultFile.write(recall + '\n')
resultFile.write(precision + '\n')
resultFile.write(f1 + '\n\n')
confusionFile.close()
resultFile.close()
print(score)
print(recall)
print(precision)
print(f1)
def processBiRNN(modelName, RNN='LSTM', balancedWeight='None', embedding='None', char=False, epochs=4, dev=False):
if RNN == 'LSTM':
resultName = 'result/BiLSTM_' + modelName + '_' + balancedWeight
elif RNN == 'GRU':
resultName = 'result/BiGRU_' + modelName + '_' + balancedWeight
ids_train, ids_val, labels_train, labels_val, places_train, places_val, contents_train, contents_val, tweetVector_train, tweetVector_val, embMatrix, word_index = loadData(
modelName, char, embedding, dev=dev)
labelNum = len(np.unique(np.concatenate([labels_train, labels_val])))
encoder = LabelEncoder()
encoder.fit(np.concatenate([labels_train, labels_val]))
labels_train = encoder.transform(labels_train)
labels_val = encoder.transform(labels_val)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
# training
if dev:
verbose = 2
else:
verbose = 0
print('training...')
eval = evaluation.evalMetrics(labelNum)
inputs = Input(batch_shape=(batch_size, tweetLength,), name='tweet_input')
if embedding in ['word2vec', 'glove']:
embedding_tweet = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True)(inputs)
else:
embedding_tweet = Embedding(vocabSize, embeddingVectorLength)(inputs)
if RNN == 'LSTM':
tweet_rnn = Bidirectional(LSTM(200, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn'))(embedding_tweet)
elif RNN == 'GRU':
tweet_rnn = Bidirectional(GRU(300, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn'))(embedding_tweet)
tweet_output = Dense(labelNum, activation='softmax', name='output')(tweet_rnn)
model = Model(inputs=inputs, outputs=tweet_output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
if len(labels_train) % batch_size != 0:
tweetVector_train = tweetVector_train[:-(len(tweetVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
if len(labels_val) % batch_size != 0:
tweetVector_val = tweetVector_val[:-(len(tweetVector_val) % batch_size)]
labels_val = labels_val[:-(len(labels_val) % batch_size)]
places_val = places_val[:-(len(places_val) % batch_size)]
ids_val = ids_val[:-(len(ids_val) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
labelVector_val = np_utils.to_categorical(labels_val)
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, validation_data=(tweetVector_val, labelVector_val), epochs=epochs, batch_size=batch_size, class_weight=classWeight, verbose=verbose)
else:
trainHistory = model.fit(tweetVector_train, labelVector_train, validation_data=(tweetVector_val, labelVector_val), epochs=epochs, batch_size=batch_size, verbose=verbose)
accuracyHist = trainHistory.history['val_acc']
lossHist = trainHistory.history['val_loss']
tuneFile = open(resultName + '.tune', 'a')
for index, loss in enumerate(lossHist):
tuneFile.write(str(index) + '\t' + str(loss) + '\t' + str(accuracyHist[index]) + '\n')
tuneFile.write('\n')
tuneFile.close()
scores = model.evaluate(tweetVector_val, labelVector_val, batch_size=batch_size, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
predictions = model.predict(tweetVector_val, batch_size=batch_size)
sampleFile = open(resultName + '.sample', 'a')
predLabels = []
trueLabel_val = encoder.inverse_transform(labels_val)
for index, pred in enumerate(predictions):
predLabel = labelList[pred.tolist().index(max(pred))]
sampleFile.write(ids_val[index] + '\t' + contents_val[index] + '\t' + trueLabel_val[index] + '\t' + predLabel + '\t' + places_val[index] + '\n')
predLabels.append(predLabel)
sampleFile.close()
eval.addEval(scores[1], trueLabel_val, predLabels)
if not dev:
score, scoreSTD = eval.getScore()
precision, preSTD = eval.getPrecision()
recall, recSTD = eval.getRecall()
f1, f1STD = eval.getF1()
conMatrix = eval.getConMatrix()
resultFile = open(resultName + '.result', 'a')
confusionFile = open(resultName + '.confMatrix', 'a')
for row in conMatrix:
lineOut = ''
for line in row:
lineOut += str(line) + '\t'
confusionFile.write(lineOut.strip() + '\n')
confusionFile.write('\n')
resultFile.write(score + '\t' + scoreSTD + '\n')
resultFile.write(recall + '\t' + recSTD + '\n')
resultFile.write(precision + '\t' + preSTD + '\n')
resultFile.write(f1 + '\t' + f1STD + '\n\n')
confusionFile.close()
resultFile.close()
print(score + ' ' + scoreSTD)
print(recall + ' ' + recSTD)
print(precision + ' ' + preSTD)
print(f1 + ' ' + f1STD)
def processAttRNN(modelName, RNN='LSTM', balancedWeight='None', embedding='None', char=False, epochs=4, dev=False):
if RNN == 'LSTM':
resultName = 'result/AttLSTM_' + modelName + '_' + balancedWeight
elif RNN == 'GRU':
resultName = 'result/AttGRU_' + modelName + '_' + balancedWeight
ids_train, ids_val, labels_train, labels_val, places_train, places_val, contents_train, contents_val, tweetVector_train, tweetVector_val, embMatrix, word_index = loadData(modelName, char, embedding, dev=dev)
labelNum = len(np.unique(np.concatenate([labels_train, labels_val])))
encoder = LabelEncoder()
encoder.fit(np.concatenate([labels_train, labels_val]))
labels_train = encoder.transform(labels_train)
labels_val = encoder.transform(labels_val)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
#trainDataList = [ids_train, labels_train, places_train, contents_train, tweetVector_train]
#testDataList = [ids_val, labels_val, places_val, contents_val, tweetVector_val]
#expData = manageData(trainDataList, testDataList)
# training
if dev:
verbose = 2
else:
verbose = 0
print('training...')
eval = evaluation.evalMetrics(labelNum)
inputs = Input(batch_shape=(batch_size, tweetLength, ), name='tweet_input')
if embedding in ['word2vec', 'glove']:
embedding_tweet = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True)(inputs)
else:
embedding_tweet = Embedding(vocabSize, embeddingVectorLength)(inputs)
if RNN == 'LSTM':
tweet_rnn = LSTM(200, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn', return_sequences=True)(embedding_tweet)
elif RNN == 'GRU':
tweet_rnn = GRU(300, dropout=0.2, recurrent_dropout=0.2, name='tweet_rnn', return_sequences=True)(embedding_tweet)
self_attention = SeqSelfAttention(attention_activation='sigmoid', name='self_attention')(tweet_rnn)
#flatten_result = Flatten()(self_attention)
last_timestep = Lambda(lambda x: x[:, -1, :])(self_attention)
tweet_output = Dense(labelNum, activation='softmax', name='output')(last_timestep)
model = Model(inputs=inputs, outputs=tweet_output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
if len(labels_train) % batch_size != 0:
tweetVector_train = tweetVector_train[:-(len(tweetVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
if len(labels_val) % batch_size != 0:
tweetVector_val = tweetVector_val[:-(len(tweetVector_val) % batch_size)]
labels_val = labels_val[:-(len(labels_val) % batch_size)]
places_val = places_val[:-(len(places_val) % batch_size)]
ids_val = ids_val[:-(len(ids_val) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
labelVector_val = np_utils.to_categorical(labels_val)
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(tweetVector_val, labelVector_val), sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(tweetVector_val, labelVector_val), class_weight=classWeight, verbose=verbose)
else:
trainHistory = model.fit(tweetVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, validation_data=(tweetVector_val, labelVector_val), verbose=verbose)
accuracyHist = trainHistory.history['val_acc']
lossHist = trainHistory.history['val_loss']
tuneFile = open(resultName + '.tune', 'a')
for index, loss in enumerate(lossHist):
tuneFile.write(str(index) + '\t' + str(loss)+'\t'+str(accuracyHist[index])+'\n')
tuneFile.write('\n')
tuneFile.close()
scores = model.evaluate(tweetVector_val, labelVector_val, batch_size=batch_size, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
predictions = model.predict(tweetVector_val, batch_size=batch_size)
sampleFile = open(resultName + '.sample', 'a')
predLabels = []
trueLabel_val = encoder.inverse_transform(labels_val)
for index, pred in enumerate(predictions):
predLabel = labelList[pred.tolist().index(max(pred))]
sampleFile.write(ids_val[index] + '\t' + contents_val[index] + '\t' + trueLabel_val[index] + '\t' + predLabel + '\t' + places_val[index] + '\n')
predLabels.append(predLabel)
sampleFile.close()
eval.addEval(scores[1], trueLabel_val, predLabels)
if not dev:
score, scoreSTD = eval.getScore()
precision, preSTD = eval.getPrecision()
recall, recSTD = eval.getRecall()
f1, f1STD = eval.getF1()
conMatrix = eval.getConMatrix()
resultFile = open(resultName + '.result', 'a')
confusionFile = open(resultName + '.confMatrix', 'a')
for row in conMatrix:
lineOut = ''
for line in row:
lineOut += str(line) + '\t'
confusionFile.write(lineOut.strip() + '\n')
confusionFile.write('\n')
resultFile.write(score + '\t' + scoreSTD + '\n')
resultFile.write(recall + '\t' + recSTD + '\n')
resultFile.write(precision + '\t' + preSTD + '\n')
resultFile.write(f1 + '\t' + f1STD + '\n\n')
confusionFile.close()
resultFile.close()
print(score + ' ' + scoreSTD)
print(recall + ' ' + recSTD)
print(precision + ' ' + preSTD)
print(f1 + ' ' + f1STD)
def processCNNLSTM(modelName, RNN='LSTM', balancedWeight='None', embedding='None', char=False, epochs=4, dev=False):
if RNN == 'LSTM':
resultName = 'result/CNNLSTM_' + modelName + '_' + balancedWeight
elif RNN == 'GRU':
resultName = 'result/CNNGRU_' + modelName + '_' + balancedWeight
ids_train, ids_val, labels_train, labels_val, places_train, places_val, contents_train, contents_val, tweetVector_train, tweetVector_val, embMatrix, word_index = loadData(
modelName, char, embedding, dev=dev)
labelNum = len(np.unique(np.concatenate([labels_train, labels_val])))
encoder = LabelEncoder()
encoder.fit(np.concatenate([labels_train, labels_val]))
labels_train = encoder.transform(labels_train)
labels_val = encoder.transform(labels_val)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
# training
if dev:
verbose = 2
else:
verbose = 0
print('training...')
eval = evaluation.evalMetrics(labelNum)
input = Input(batch_shape=(batch_size, tweetLength, ))
if embedding in ['word2vec', 'glove']:
embedding_tweet = Embedding(len(word_index)+1, 200, weights=[embMatrix], trainable=True)(input)
else:
embedding_tweet = Embedding(vocabSize, embeddingVectorLength)(input)
tweet_cnn = Conv1D(filters=64, kernel_size=3, padding='same', activation='relu')(embedding_tweet)
tweet_pool = MaxPooling1D(pool_size=2)(tweet_cnn)
if RNN == 'LSTM':
tweet_rnn = LSTM(200, dropout=0.2, recurrent_dropout=0.2, name='LSTM')(tweet_pool)
elif RNN == 'GRU':
tweet_rnn = GRU(300, dropout=0.2, recurrent_dropout=0.2, name='GRU')(tweet_pool)
output = Dense(labelNum, activation='softmax')(tweet_rnn)
model = Model(inputs=input, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# print(model.summary())
if len(labels_train) % batch_size != 0:
tweetVector_train = tweetVector_train[:-(len(tweetVector_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
if len(labels_val) % batch_size != 0:
tweetVector_val = tweetVector_val[:-(len(tweetVector_val) % batch_size)]
labels_val = labels_val[:-(len(labels_val) % batch_size)]
places_val = places_val[:-(len(places_val) % batch_size)]
ids_val = ids_val[:-(len(ids_val) % batch_size)]
labelVector_train = np_utils.to_categorical(labels_train)
labelVector_val = np_utils.to_categorical(labels_val)
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, epochs=epochs, batch_size=batch_size, sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
trainHistory = model.fit(tweetVector_train, labelVector_train, validation_data=(tweetVector_val, labelVector_val), epochs=epochs, batch_size=batch_size, class_weight=classWeight, verbose=verbose)
else:
trainHistory = model.fit(tweetVector_train, labelVector_train,
validation_data=(tweetVector_val, labelVector_val), epochs=epochs, batch_size=batch_size, verbose=verbose)
accuracyHist = trainHistory.history['val_acc']
lossHist = trainHistory.history['val_loss']
tuneFile = open(resultName + '.tune', 'a')
for index, loss in enumerate(lossHist):
tuneFile.write(str(index) + '\t' + str(loss) + '\t' + str(accuracyHist[index]) + '\n')
tuneFile.write('\n')
tuneFile.close()
scores = model.evaluate(tweetVector_val, labelVector_val, batch_size=batch_size, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
predictions = model.predict(tweetVector_val, batch_size=batch_size)
sampleFile = open(resultName + '.sample', 'a')
predLabels = []
trueLabel_val = encoder.inverse_transform(labels_val)
for index, pred in enumerate(predictions):
predLabel = labelList[pred.tolist().index(max(pred))]
sampleFile.write(ids_val[index] + '\t' + contents_val[index] + '\t' + trueLabel_val[index] + '\t' + predLabel + '\t' + places_val[index] + '\n')
predLabels.append(predLabel)
sampleFile.close()
eval.addEval(scores[1], trueLabel_val, predLabels)
if not dev:
score, scoreSTD = eval.getScore()
precision, preSTD = eval.getPrecision()
recall, recSTD = eval.getRecall()
f1, f1STD = eval.getF1()
conMatrix = eval.getConMatrix()
resultFile = open(resultName + '.result', 'a')
confusionFile = open(resultName + '.confMatrix', 'a')
for row in conMatrix:
lineOut = ''
for line in row:
lineOut += str(line) + '\t'
confusionFile.write(lineOut.strip() + '\n')
confusionFile.write('\n')
resultFile.write(score + '\n')
resultFile.write(recall + '\n')
resultFile.write(precision + '\n')
resultFile.write(f1 + '\n\n')
confusionFile.close()
resultFile.close()
print(score)
print(recall)
print(precision)
print(f1)
if __name__ == "__main__":
modelName = 'long1.5'
#modelName = 'yelpUserReview'
embModel = 'glove'
#processRNN(modelName, RNN='GRU', balancedWeight='class', embedding=embModel, char=False, epochs=5, dev=False)
#processBiRNN(modelName, RNN='GRU', balancedWeight='class', embedding=embModel, char=False, epochs=3, dev=False)
#processAttRNN(modelName, RNN='GRU', balancedWeight='class', embedding=embModel, char=False, epochs=4, dev=False)
processCNNLSTM(modelName, RNN='GRU', balancedWeight='class', embedding=embModel, char=False, epochs=4, dev=False)
|
|
"""Support for Rflink lights."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
Light,
)
from homeassistant.const import CONF_NAME, CONF_TYPE
import homeassistant.helpers.config_validation as cv
from . import (
CONF_ALIASES,
CONF_ALIASSES,
CONF_AUTOMATIC_ADD,
CONF_DEVICE_DEFAULTS,
CONF_DEVICES,
CONF_FIRE_EVENT,
CONF_GROUP,
CONF_GROUP_ALIASES,
CONF_GROUP_ALIASSES,
CONF_NOGROUP_ALIASES,
CONF_NOGROUP_ALIASSES,
CONF_SIGNAL_REPETITIONS,
DATA_DEVICE_REGISTER,
DEVICE_DEFAULTS_SCHEMA,
EVENT_KEY_COMMAND,
EVENT_KEY_ID,
SwitchableRflinkDevice,
remove_deprecated,
)
_LOGGER = logging.getLogger(__name__)
TYPE_DIMMABLE = "dimmable"
TYPE_SWITCHABLE = "switchable"
TYPE_HYBRID = "hybrid"
TYPE_TOGGLE = "toggle"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})
): DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE): vol.Any(
TYPE_DIMMABLE, TYPE_SWITCHABLE, TYPE_HYBRID, TYPE_TOGGLE
),
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_GROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FIRE_EVENT): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
# deprecated config options
vol.Optional(CONF_ALIASSES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_ALIASSES): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NOGROUP_ALIASSES): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
},
extra=vol.ALLOW_EXTRA,
)
def entity_type_for_device_id(device_id):
"""Return entity class for protocol of a given device_id.
Async friendly.
"""
entity_type_mapping = {
# KlikAanKlikUit support both dimmers and on/off switches on the same
# protocol
"newkaku": TYPE_HYBRID
}
protocol = device_id.split("_")[0]
return entity_type_mapping.get(protocol, None)
def entity_class_for_type(entity_type):
"""Translate entity type to entity class.
Async friendly.
"""
entity_device_mapping = {
# sends only 'dim' commands not compatible with on/off switches
TYPE_DIMMABLE: DimmableRflinkLight,
# sends only 'on/off' commands not advices with dimmers and signal
# repetition
TYPE_SWITCHABLE: RflinkLight,
# sends 'dim' and 'on' command to support both dimmers and on/off
# switches. Not compatible with signal repetition.
TYPE_HYBRID: HybridRflinkLight,
# sends only 'on' commands for switches which turn on and off
# using the same 'on' command for both.
TYPE_TOGGLE: ToggleRflinkLight,
}
return entity_device_mapping.get(entity_type, RflinkLight)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink light devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
# Determine which kind of entity to create
if CONF_TYPE in config:
# Remove type from config to not pass it as and argument to entity
# instantiation
entity_type = config.pop(CONF_TYPE)
else:
entity_type = entity_type_for_device_id(device_id)
entity_class = entity_class_for_type(entity_type)
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
remove_deprecated(device_config)
is_hybrid = entity_class is HybridRflinkLight
# Make user aware this can cause problems
repetitions_enabled = device_config[CONF_SIGNAL_REPETITIONS] != 1
if is_hybrid and repetitions_enabled:
_LOGGER.warning(
"Hybrid type for %s not compatible with signal "
"repetitions. Please set 'dimmable' or 'switchable' "
"type explicitly in configuration",
device_id,
)
device = entity_class(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink light platform."""
async_add_entities(devices_from_config(config))
async def add_new_device(event):
"""Check if device is known, otherwise add to list of known devices."""
device_id = event[EVENT_KEY_ID]
entity_type = entity_type_for_device_id(event[EVENT_KEY_ID])
entity_class = entity_class_for_type(entity_type)
device_config = config[CONF_DEVICE_DEFAULTS]
device = entity_class(device_id, initial_event=event, **device_config)
async_add_entities([device])
if config[CONF_AUTOMATIC_ADD]:
hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_COMMAND] = add_new_device
# pylint: disable=too-many-ancestors
class RflinkLight(SwitchableRflinkDevice, Light):
"""Representation of a Rflink light."""
pass
# pylint: disable=too-many-ancestors
class DimmableRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device that support dimming."""
_brightness = 255
async def async_added_to_hass(self):
"""Restore RFLink light brightness attribute."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if (
old_state is not None
and old_state.attributes.get(ATTR_BRIGHTNESS) is not None
):
# restore also brightness in dimmables devices
self._brightness = int(old_state.attributes[ATTR_BRIGHTNESS])
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_BRIGHTNESS in kwargs:
# rflink only support 16 brightness levels
self._brightness = int(kwargs[ATTR_BRIGHTNESS] / 17) * 17
# Turn on light at the requested dim level
await self._async_handle_command("dim", self._brightness)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attr = {}
if self._brightness is not None:
attr[ATTR_BRIGHTNESS] = self._brightness
return attr
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
# pylint: disable=too-many-ancestors
class HybridRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device that sends out both dim and on/off commands.
Used for protocols which support lights that are not exclusively on/off
style. For example KlikAanKlikUit supports both on/off and dimmable light
switches using the same protocol. This type allows unconfigured
KlikAanKlikUit devices to support dimming without breaking support for
on/off switches.
This type is not compatible with signal repetitions as the 'dim' and 'on'
command are send sequential and multiple 'on' commands to a dimmable
device can cause the dimmer to switch into a pulsating brightness mode.
Which results in a nice house disco :)
"""
_brightness = 255
async def async_added_to_hass(self):
"""Restore RFLink light brightness attribute."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if (
old_state is not None
and old_state.attributes.get(ATTR_BRIGHTNESS) is not None
):
# restore also brightness in dimmables devices
self._brightness = int(old_state.attributes[ATTR_BRIGHTNESS])
async def async_turn_on(self, **kwargs):
"""Turn the device on and set dim level."""
if ATTR_BRIGHTNESS in kwargs:
# rflink only support 16 brightness levels
self._brightness = int(kwargs[ATTR_BRIGHTNESS] / 17) * 17
# if receiver supports dimming this will turn on the light
# at the requested dim level
await self._async_handle_command("dim", self._brightness)
# if the receiving device does not support dimlevel this
# will ensure it is turned on when full brightness is set
if self._brightness == 255:
await self._async_handle_command("turn_on")
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attr = {}
if self._brightness is not None:
attr[ATTR_BRIGHTNESS] = self._brightness
return attr
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
# pylint: disable=too-many-ancestors
class ToggleRflinkLight(SwitchableRflinkDevice, Light):
"""Rflink light device which sends out only 'on' commands.
Some switches like for example Livolo light switches use the
same 'on' command to switch on and switch off the lights.
If the light is on and 'on' gets sent, the light will turn off
and if the light is off and 'on' gets sent, the light will turn on.
"""
@property
def entity_id(self):
"""Return entity id."""
return "light.{}".format(self.name)
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command == "on":
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._async_handle_command("toggle")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_handle_command("toggle")
|
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import textwrap
from collections import Counter
from glob import glob
from itertools import chain, product
from typing import Any, Dict, Iterable, List
import jsonschema
import yaml
from tabulate import tabulate
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore[no-redef]
if __name__ != "__main__":
raise Exception(
"This file is intended to be executed as an executable program. You cannot use it as a module."
)
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
PROVIDER_DATA_SCHEMA_PATH = os.path.join(ROOT_DIR, "airflow", "provider.yaml.schema.json")
CORE_INTEGRATIONS = ["SQL", "Local"]
errors = []
def _filepath_to_module(filepath: str):
filepath = os.path.relpath(os.path.abspath(filepath), ROOT_DIR)
if filepath.endswith(".py"):
filepath = filepath[: -(len(".py"))]
return filepath.replace("/", ".")
def _load_schema() -> Dict[str, Any]:
with open(PROVIDER_DATA_SCHEMA_PATH) as schema_file:
content = json.load(schema_file)
return content
def _load_package_data(package_paths: Iterable[str]):
schema = _load_schema()
result = {}
for provider_yaml_path in package_paths:
with open(provider_yaml_path) as yaml_file:
provider = yaml.load(yaml_file, SafeLoader)
rel_path = os.path.relpath(provider_yaml_path, ROOT_DIR)
try:
jsonschema.validate(provider, schema=schema)
except jsonschema.ValidationError:
raise Exception(f"Unable to parse: {rel_path}.")
result[rel_path] = provider
return result
def get_all_integration_names(yaml_files):
all_integrations = [
i['integration-name'] for f in yaml_files.values() if 'integrations' in f for i in f["integrations"]
]
all_integrations += ["SQL", "Local"]
return all_integrations
def check_integration_duplicates(yaml_files: Dict[str, Dict]):
"""Integration names must be globally unique."""
print("Checking integration duplicates")
all_integrations = get_all_integration_names(yaml_files)
duplicates = [(k, v) for (k, v) in Counter(all_integrations).items() if v > 1]
if duplicates:
print(
"Duplicate integration names found. Integration names must be globally unique. "
"Please delete duplicates."
)
print(tabulate(duplicates, headers=["Integration name", "Number of occurrences"]))
sys.exit(0)
def assert_sets_equal(set1, set2):
try:
difference1 = set1.difference(set2)
except TypeError as e:
raise AssertionError(f'invalid type when attempting set difference: {e}')
except AttributeError as e:
raise AssertionError(f'first argument does not support set difference: {e}')
try:
difference2 = set2.difference(set1)
except TypeError as e:
raise AssertionError(f'invalid type when attempting set difference: {e}')
except AttributeError as e:
raise AssertionError(f'second argument does not support set difference: {e}')
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append(' -- Items in the left set but not the right:')
for item in sorted(difference1):
lines.append(f' {item!r}')
if difference2:
lines.append(' -- Items in the right set but not the left:')
for item in sorted(difference2):
lines.append(f' {item!r}')
standard_msg = '\n'.join(lines)
raise AssertionError(standard_msg)
def check_if_objects_belongs_to_package(
object_names: List[str], provider_package: str, yaml_file_path: str, resource_type: str
):
for object_name in object_names:
if not object_name.startswith(provider_package):
errors.append(
f"The `{object_name}` object in {resource_type} list in {yaml_file_path} does not start"
f" with the expected {provider_package}."
)
def parse_module_data(provider_data, resource_type, yaml_file_path):
package_dir = ROOT_DIR + "/" + os.path.dirname(yaml_file_path)
provider_package = os.path.dirname(yaml_file_path).replace(os.sep, ".")
py_files = chain(
glob(f"{package_dir}/**/{resource_type}/*.py"), glob(f"{package_dir}/{resource_type}/*.py")
)
expected_modules = {_filepath_to_module(f) for f in py_files if not f.endswith("/__init__.py")}
resource_data = provider_data.get(resource_type, [])
return expected_modules, provider_package, resource_data
def check_completeness_of_list_of_hooks_sensors_hooks(yaml_files: Dict[str, Dict]):
print("Checking completeness of list of {sensors, hooks, operators}")
print(" -- {sensors, hooks, operators} - Expected modules(Left): Current Modules(Right)")
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
expected_modules, provider_package, resource_data = parse_module_data(
provider_data, resource_type, yaml_file_path
)
current_modules = {str(i) for r in resource_data for i in r.get('python-modules', [])}
check_if_objects_belongs_to_package(current_modules, provider_package, yaml_file_path, resource_type)
try:
assert_sets_equal(set(expected_modules), set(current_modules))
except AssertionError as ex:
nested_error = textwrap.indent(str(ex), ' ')
errors.append(
f"Incorrect content of key '{resource_type}/python-modules' "
f"in file: {yaml_file_path}\n{nested_error}"
)
def check_duplicates_in_integrations_names_of_hooks_sensors_operators(yaml_files: Dict[str, Dict]):
print("Checking for duplicates in list of {sensors, hooks, operators}")
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
resource_data = provider_data.get(resource_type, [])
current_integrations = [r.get("integration-name", "") for r in resource_data]
if len(current_integrations) != len(set(current_integrations)):
for integration in current_integrations:
if current_integrations.count(integration) > 1:
errors.append(
f"Duplicated content of '{resource_type}/integration-name/{integration}' "
f"in file: {yaml_file_path}"
)
def check_completeness_of_list_of_transfers(yaml_files: Dict[str, Dict]):
print("Checking completeness of list of transfers")
resource_type = 'transfers'
print(" -- Expected transfers modules(Left): Current transfers Modules(Right)")
for yaml_file_path, provider_data in yaml_files.items():
expected_modules, provider_package, resource_data = parse_module_data(
provider_data, resource_type, yaml_file_path
)
current_modules = {r.get('python-module') for r in resource_data}
check_if_objects_belongs_to_package(current_modules, provider_package, yaml_file_path, resource_type)
try:
assert_sets_equal(set(expected_modules), set(current_modules))
except AssertionError as ex:
nested_error = textwrap.indent(str(ex), ' ')
errors.append(
f"Incorrect content of key '{resource_type}/python-module' "
f"in file: {yaml_file_path}\n{nested_error}"
)
def check_hook_classes(yaml_files: Dict[str, Dict]):
print("Checking connection classes belong to package")
resource_type = 'hook-class-names'
for yaml_file_path, provider_data in yaml_files.items():
provider_package = os.path.dirname(yaml_file_path).replace(os.sep, ".")
hook_class_names = provider_data.get(resource_type)
if hook_class_names:
check_if_objects_belongs_to_package(
hook_class_names, provider_package, yaml_file_path, resource_type
)
def check_duplicates_in_list_of_transfers(yaml_files: Dict[str, Dict]):
print("Checking for duplicates in list of transfers")
errors = []
resource_type = "transfers"
for yaml_file_path, provider_data in yaml_files.items():
resource_data = provider_data.get(resource_type, [])
source_target_integrations = [
(r.get("source-integration-name", ""), r.get("target-integration-name", ""))
for r in resource_data
]
if len(source_target_integrations) != len(set(source_target_integrations)):
for integration_couple in source_target_integrations:
if source_target_integrations.count(integration_couple) > 1:
errors.append(
f"Duplicated content of \n"
f" '{resource_type}/source-integration-name/{integration_couple[0]}' "
f" '{resource_type}/target-integration-name/{integration_couple[1]}' "
f"in file: {yaml_file_path}"
)
def check_invalid_integration(yaml_files: Dict[str, Dict]):
print("Detect unregistered integrations")
all_integration_names = set(get_all_integration_names(yaml_files))
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
resource_data = provider_data.get(resource_type, [])
current_names = {r['integration-name'] for r in resource_data}
invalid_names = current_names - all_integration_names
if invalid_names:
errors.append(
f"Incorrect content of key '{resource_type}/integration-name' in file: {yaml_file_path}. "
f"Invalid values: {invalid_names}"
)
for (yaml_file_path, provider_data), key in product(
yaml_files.items(), ['source-integration-name', 'target-integration-name']
):
resource_data = provider_data.get('transfers', [])
current_names = {r[key] for r in resource_data}
invalid_names = current_names - all_integration_names
if invalid_names:
errors.append(
f"Incorrect content of key 'transfers/{key}' in file: {yaml_file_path}. "
f"Invalid values: {invalid_names}"
)
def check_doc_files(yaml_files: Dict[str, Dict]):
print("Checking doc files")
current_doc_urls = []
current_logo_urls = []
for provider in yaml_files.values():
if 'integrations' in provider:
current_doc_urls.extend(
guide
for guides in provider['integrations']
if 'how-to-guide' in guides
for guide in guides['how-to-guide']
)
current_logo_urls.extend(
integration['logo'] for integration in provider['integrations'] if 'logo' in integration
)
if 'transfers' in provider:
current_doc_urls.extend(
op['how-to-guide'] for op in provider['transfers'] if 'how-to-guide' in op
)
expected_doc_urls = {
"/docs/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/apache-airflow-providers-*/operators/**/*.rst", recursive=True)
if not f.endswith("/index.rst") and '/_partials' not in f
}
expected_doc_urls |= {
"/docs/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/apache-airflow-providers-*/operators.rst", recursive=True)
}
expected_logo_urls = {
"/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/integration-logos/**/*", recursive=True)
if os.path.isfile(f)
}
try:
print(" -- Checking document urls: expected(left), current(right)")
assert_sets_equal(set(expected_doc_urls), set(current_doc_urls))
print(" -- Checking logo urls: expected(left), current(right)")
assert_sets_equal(set(expected_logo_urls), set(current_logo_urls))
except AssertionError as ex:
print(ex)
sys.exit(1)
def check_unique_provider_name(yaml_files: Dict[str, Dict]):
provider_names = [d['name'] for d in yaml_files.values()]
duplicates = {x for x in provider_names if provider_names.count(x) > 1}
if duplicates:
errors.append(f"Provider name must be unique. Duplicates: {duplicates}")
if __name__ == '__main__':
all_provider_files = sorted(glob(f"{ROOT_DIR}/airflow/providers/**/provider.yaml", recursive=True))
if len(sys.argv) > 1:
paths = sorted(sys.argv[1:])
else:
paths = all_provider_files
all_parsed_yaml_files: Dict[str, Dict] = _load_package_data(paths)
all_files_loaded = len(all_provider_files) == len(paths)
check_integration_duplicates(all_parsed_yaml_files)
check_completeness_of_list_of_hooks_sensors_hooks(all_parsed_yaml_files)
check_duplicates_in_integrations_names_of_hooks_sensors_operators(all_parsed_yaml_files)
check_completeness_of_list_of_transfers(all_parsed_yaml_files)
check_duplicates_in_list_of_transfers(all_parsed_yaml_files)
check_hook_classes(all_parsed_yaml_files)
check_unique_provider_name(all_parsed_yaml_files)
if all_files_loaded:
# Only check those if all provider files are loaded
check_doc_files(all_parsed_yaml_files)
check_invalid_integration(all_parsed_yaml_files)
if errors:
print(f"Found {len(errors)} errors")
for error in errors:
print(error)
print()
sys.exit(1)
|
|
"""Support to serve the Home Assistant API as WSGI application."""
from ipaddress import ip_network
import logging
import os
import ssl
from typing import Optional
from aiohttp import web
from aiohttp.web_exceptions import HTTPMovedPermanently
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, SERVER_PORT)
import homeassistant.helpers.config_validation as cv
import homeassistant.util as hass_util
from homeassistant.util import ssl as ssl_util
from homeassistant.util.logging import HideSensitiveDataFilter
from .auth import setup_auth
from .ban import setup_bans
from .const import ( # noqa
KEY_AUTHENTICATED,
KEY_HASS,
KEY_HASS_USER,
KEY_REAL_IP,
)
from .cors import setup_cors
from .real_ip import setup_real_ip
from .static import CACHE_HEADERS, CachingStaticResource
from .view import HomeAssistantView # noqa
DOMAIN = 'http'
CONF_API_PASSWORD = 'api_password'
CONF_SERVER_HOST = 'server_host'
CONF_SERVER_PORT = 'server_port'
CONF_BASE_URL = 'base_url'
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_PEER_CERTIFICATE = 'ssl_peer_certificate'
CONF_SSL_KEY = 'ssl_key'
CONF_CORS_ORIGINS = 'cors_allowed_origins'
CONF_USE_X_FORWARDED_FOR = 'use_x_forwarded_for'
CONF_TRUSTED_PROXIES = 'trusted_proxies'
CONF_TRUSTED_NETWORKS = 'trusted_networks'
CONF_LOGIN_ATTEMPTS_THRESHOLD = 'login_attempts_threshold'
CONF_IP_BAN_ENABLED = 'ip_ban_enabled'
CONF_SSL_PROFILE = 'ssl_profile'
SSL_MODERN = 'modern'
SSL_INTERMEDIATE = 'intermediate'
_LOGGER = logging.getLogger(__name__)
DEFAULT_SERVER_HOST = '0.0.0.0'
DEFAULT_DEVELOPMENT = '0'
NO_LOGIN_ATTEMPT_THRESHOLD = -1
def trusted_networks_deprecated(value):
"""Warn user trusted_networks config is deprecated."""
if not value:
return value
_LOGGER.warning(
"Configuring trusted_networks via the http component has been"
" deprecated. Use the trusted networks auth provider instead."
" For instructions, see https://www.home-assistant.io/docs/"
"authentication/providers/#trusted-networks")
return value
def api_password_deprecated(value):
"""Warn user api_password config is deprecated."""
if not value:
return value
_LOGGER.warning(
"Configuring api_password via the http component has been"
" deprecated. Use the legacy api password auth provider instead."
" For instructions, see https://www.home-assistant.io/docs/"
"authentication/providers/#legacy-api-password")
return value
HTTP_SCHEMA = vol.Schema({
vol.Optional(CONF_API_PASSWORD):
vol.All(cv.string, api_password_deprecated),
vol.Optional(CONF_SERVER_HOST, default=DEFAULT_SERVER_HOST): cv.string,
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT): cv.port,
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_PEER_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_KEY): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Inclusive(CONF_USE_X_FORWARDED_FOR, 'proxy'): cv.boolean,
vol.Inclusive(CONF_TRUSTED_PROXIES, 'proxy'):
vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_NETWORKS, default=[]):
vol.All(cv.ensure_list, [ip_network], trusted_networks_deprecated),
vol.Optional(CONF_LOGIN_ATTEMPTS_THRESHOLD,
default=NO_LOGIN_ATTEMPT_THRESHOLD):
vol.Any(cv.positive_int, NO_LOGIN_ATTEMPT_THRESHOLD),
vol.Optional(CONF_IP_BAN_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_SSL_PROFILE, default=SSL_MODERN):
vol.In([SSL_INTERMEDIATE, SSL_MODERN]),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: HTTP_SCHEMA,
}, extra=vol.ALLOW_EXTRA)
class ApiConfig:
"""Configuration settings for API server."""
def __init__(self, host: str, port: Optional[int] = SERVER_PORT,
use_ssl: bool = False) -> None:
"""Initialize a new API config object."""
self.host = host
self.port = port
host = host.rstrip('/')
if host.startswith(("http://", "https://")):
self.base_url = host
elif use_ssl:
self.base_url = "https://{}".format(host)
else:
self.base_url = "http://{}".format(host)
if port is not None:
self.base_url += ':{}'.format(port)
async def async_setup(hass, config):
"""Set up the HTTP API and debug interface."""
conf = config.get(DOMAIN)
if conf is None:
conf = HTTP_SCHEMA({})
api_password = conf.get(CONF_API_PASSWORD)
server_host = conf[CONF_SERVER_HOST]
server_port = conf[CONF_SERVER_PORT]
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_peer_certificate = conf.get(CONF_SSL_PEER_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
cors_origins = conf[CONF_CORS_ORIGINS]
use_x_forwarded_for = conf.get(CONF_USE_X_FORWARDED_FOR, False)
trusted_proxies = conf.get(CONF_TRUSTED_PROXIES, [])
is_ban_enabled = conf[CONF_IP_BAN_ENABLED]
login_threshold = conf[CONF_LOGIN_ATTEMPTS_THRESHOLD]
ssl_profile = conf[CONF_SSL_PROFILE]
if api_password is not None:
logging.getLogger('aiohttp.access').addFilter(
HideSensitiveDataFilter(api_password))
server = HomeAssistantHTTP(
hass,
server_host=server_host,
server_port=server_port,
ssl_certificate=ssl_certificate,
ssl_peer_certificate=ssl_peer_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins,
use_x_forwarded_for=use_x_forwarded_for,
trusted_proxies=trusted_proxies,
login_threshold=login_threshold,
is_ban_enabled=is_ban_enabled,
ssl_profile=ssl_profile,
)
async def stop_server(event):
"""Stop the server."""
await server.stop()
async def start_server(event):
"""Start the server."""
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_server)
await server.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_server)
hass.http = server
host = conf.get(CONF_BASE_URL)
if host:
port = None
elif server_host != DEFAULT_SERVER_HOST:
host = server_host
port = server_port
else:
host = hass_util.get_local_ip()
port = server_port
hass.config.api = ApiConfig(host, port, ssl_certificate is not None)
return True
class HomeAssistantHTTP:
"""HTTP server for Home Assistant."""
def __init__(self, hass,
ssl_certificate, ssl_peer_certificate,
ssl_key, server_host, server_port, cors_origins,
use_x_forwarded_for, trusted_proxies,
login_threshold, is_ban_enabled, ssl_profile):
"""Initialize the HTTP Home Assistant server."""
app = self.app = web.Application(middlewares=[])
app[KEY_HASS] = hass
# This order matters
setup_real_ip(app, use_x_forwarded_for, trusted_proxies)
if is_ban_enabled:
setup_bans(hass, app, login_threshold)
setup_auth(hass, app)
setup_cors(app, cors_origins)
self.hass = hass
self.ssl_certificate = ssl_certificate
self.ssl_peer_certificate = ssl_peer_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self.is_ban_enabled = is_ban_enabled
self.ssl_profile = ssl_profile
self._handler = None
self.runner = None
self.site = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
if isinstance(view, type):
# Instantiate the view, if needed
view = view()
if not hasattr(view, 'url'):
class_name = view.__class__.__name__
raise AttributeError(
'{0} missing required attribute "url"'.format(class_name)
)
if not hasattr(view, 'name'):
class_name = view.__class__.__name__
raise AttributeError(
'{0} missing required attribute "name"'.format(class_name)
)
view.register(self.app, self.app.router)
def register_redirect(self, url, redirect_to):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
def redirect(request):
"""Redirect to location."""
raise HTTPMovedPermanently(redirect_to)
self.app.router.add_route('GET', url, redirect)
def register_static_path(self, url_path, path, cache_headers=True):
"""Register a folder or file to serve as a static path."""
if os.path.isdir(path):
if cache_headers:
resource = CachingStaticResource
else:
resource = web.StaticResource
self.app.router.register_resource(resource(url_path, path))
return
if cache_headers:
async def serve_file(request):
"""Serve file from disk."""
return web.FileResponse(path, headers=CACHE_HEADERS)
else:
async def serve_file(request):
"""Serve file from disk."""
return web.FileResponse(path)
self.app.router.add_route('GET', url_path, serve_file)
async def start(self):
"""Start the aiohttp server."""
if self.ssl_certificate:
try:
if self.ssl_profile == SSL_INTERMEDIATE:
context = ssl_util.server_context_intermediate()
else:
context = ssl_util.server_context_modern()
await self.hass.async_add_executor_job(
context.load_cert_chain, self.ssl_certificate,
self.ssl_key)
except OSError as error:
_LOGGER.error("Could not read SSL certificate from %s: %s",
self.ssl_certificate, error)
return
if self.ssl_peer_certificate:
context.verify_mode = ssl.CERT_REQUIRED
await self.hass.async_add_executor_job(
context.load_verify_locations,
self.ssl_peer_certificate)
else:
context = None
# Aiohttp freezes apps after start so that no changes can be made.
# However in Home Assistant components can be discovered after boot.
# This will now raise a RunTimeError.
# To work around this we now prevent the router from getting frozen
# pylint: disable=protected-access
self.app._router.freeze = lambda: None
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, self.server_host,
self.server_port, ssl_context=context)
try:
await self.site.start()
except OSError as error:
_LOGGER.error("Failed to create HTTP server at port %d: %s",
self.server_port, error)
async def stop(self):
"""Stop the aiohttp server."""
await self.site.stop()
await self.runner.cleanup()
|
|
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from ..externals.six.moves import xrange
from ..base import BaseEstimator, TransformerMixin
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.extmath import logsumexp
from ..utils.validation import check_non_negative
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_topics : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_topics`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_topics`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int or RandomState instance or None, optional (default=None)
Pseudo-random number generator seed control.
Attributes
----------
components_ : array, [n_topics, n_features]
Topic word distribution. ``components_[i, j]`` represents word j in
topic `i`. In the literature, this is called lambda.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://www.cs.princeton.edu/~mdhoffma/code/onlineldavb.tar
"""
def __init__(self, n_topics=10, doc_topic_prior=None,
topic_word_prior=None, learning_method='online',
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None):
self.n_topics = n_topics
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _check_params(self):
"""Check model parameters."""
if self.n_topics <= 0:
raise ValueError("Invalid 'n_topics' parameter: %r"
% self.n_topics)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online"):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self.n_topics
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self.n_topics
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_topics, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormailzed topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=self.verbose)
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total umber of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_topics)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=self.verbose) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self.perplexity(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d, perplexity: %.4f'
% (i + 1, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
self.n_iter_ += 1
return self
def transform(self, X):
"""Transform data X according to the fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_topics)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
# normalize doc_topic_distr
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_topics)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_topics = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self.n_topics)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self.transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def perplexity(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_topics)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self.transform(X)
else:
n_samples, n_topics = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_topics != self.n_topics:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
|
|
#!/usr/bin/env python3
# coding: utf-8
"""Test AST representation."""
import unittest
from triton import TritonContext, ARCH, AST_REPRESENTATION, VERSION
smtlifting = """(define-fun bswap8 ((value (_ BitVec 8))) (_ BitVec 8)
value
)
(define-fun bswap16 ((value (_ BitVec 16))) (_ BitVec 16)
(bvor
(bvshl
(bvand value (_ bv255 16))
(_ bv8 16)
)
(bvand (bvlshr value (_ bv8 16)) (_ bv255 16))
)
)
(define-fun bswap32 ((value (_ BitVec 32))) (_ BitVec 32)
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvand value (_ bv255 32))
(_ bv8 32)
)
(bvand (bvlshr value (_ bv8 32)) (_ bv255 32))
)
(_ bv8 32)
)
(bvand (bvlshr value (_ bv16 32)) (_ bv255 32))
)
(_ bv8 32)
)
(bvand (bvlshr value (_ bv24 32)) (_ bv255 32))
)
)
(define-fun bswap64 ((value (_ BitVec 64))) (_ BitVec 64)
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvor
(bvshl
(bvand value (_ bv255 64))
(_ bv8 64)
)
(bvand (bvlshr value (_ bv8 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv16 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv24 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv32 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv40 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv48 64)) (_ bv255 64))
)
(_ bv8 64)
)
(bvand (bvlshr value (_ bv56 64)) (_ bv255 64))
)
)
(declare-fun SymVar_0 () (_ BitVec 8))
(declare-fun SymVar_1 () (_ BitVec 8))
(define-fun ref!0 () (_ BitVec 8) (bvadd SymVar_0 SymVar_1)) ; ref test
"""
pythonlifting = """def select(mem, index):
return mem[index]
def store(mem, index, value):
mem[index] = value
return mem
def sx(bits, value):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
def rol(value, rot, bits):
return ((value << rot) | (value >> (bits - rot))) & ((0b1 << bits) - 1)
def ror(value, rot, bits):
return ((value >> rot) | (value << (bits - rot))) & ((0b1 << bits) - 1)
def forall(variables, expr):
return True
def bswap(value, size):
v = value & 0xff
for index in range(8, size, 8):
v <<= 8
v |= (value >> index) & 0xff
return v
SymVar_0 = int(input())
SymVar_1 = int(input())
ref_0 = ((SymVar_0 + SymVar_1) & 0xff) # ref test
"""
class TestAstRepresentation(unittest.TestCase):
"""Testing the AST Representation."""
def setUp(self):
"""Define the arch."""
self.ctx = TritonContext(ARCH.X86_64)
self.ast = self.ctx.getAstContext()
self.v1 = self.ast.variable(self.ctx.newSymbolicVariable(8))
self.v2 = self.ast.variable(self.ctx.newSymbolicVariable(8))
self.ref = self.ctx.newSymbolicExpression(self.v1 + self.v2, "ref test")
# Default
self.assertEqual(self.ctx.getAstRepresentationMode(), AST_REPRESENTATION.SMT)
self.node = [
# Overloaded operators # SMT # Python
((self.v1 & self.v2), "(bvand SymVar_0 SymVar_1)", "(SymVar_0 & SymVar_1)"),
((self.v1 + self.v2), "(bvadd SymVar_0 SymVar_1)", "((SymVar_0 + SymVar_1) & 0xFF)"),
((self.v1 - self.v2), "(bvsub SymVar_0 SymVar_1)", "((SymVar_0 - SymVar_1) & 0xFF)"),
((self.v1 ^ self.v2), "(bvxor SymVar_0 SymVar_1)", "(SymVar_0 ^ SymVar_1)"),
((self.v1 | self.v2), "(bvor SymVar_0 SymVar_1)", "(SymVar_0 | SymVar_1)"),
((self.v1 * self.v2), "(bvmul SymVar_0 SymVar_1)", "((SymVar_0 * SymVar_1) & 0xFF)"),
((self.v1 / self.v2), "(bvudiv SymVar_0 SymVar_1)", "(SymVar_0 / SymVar_1)"),
((self.v1 % self.v2), "(bvurem SymVar_0 SymVar_1)", "(SymVar_0 % SymVar_1)"),
((self.v1 << self.v2), "(bvshl SymVar_0 SymVar_1)", "((SymVar_0 << SymVar_1) & 0xFF)"),
((self.v1 >> self.v2), "(bvlshr SymVar_0 SymVar_1)", "(SymVar_0 >> SymVar_1)"),
((~self.v1), "(bvnot SymVar_0)", "(~(SymVar_0) & 0xFF)"),
((-self.v1), "(bvneg SymVar_0)", "(-(symvar_0) & 0xff)"),
((self.v1 == self.v2), "(= SymVar_0 SymVar_1)", "(SymVar_0 == SymVar_1)"),
((self.v1 != self.v2), "(not (= SymVar_0 SymVar_1))", "not (SymVar_0 == SymVar_1)"),
((self.v1 <= self.v2), "(bvule SymVar_0 SymVar_1)", "(SymVar_0 <= SymVar_1)"),
((self.v1 >= self.v2), "(bvuge SymVar_0 SymVar_1)", "(SymVar_0 >= SymVar_1)"),
((self.v1 < self.v2), "(bvult SymVar_0 SymVar_1)", "(SymVar_0 < SymVar_1)"),
((self.v1 > self.v2), "(bvugt SymVar_0 SymVar_1)", "(SymVar_0 > SymVar_1)"),
# AST api # SMT # Python
(self.ast.assert_(self.v1 == 0), "(assert (= SymVar_0 (_ bv0 8)))", "assert((SymVar_0 == 0x0))"),
(self.ast.bswap(self.v1), "(bswap8 SymVar_0)", "bswap(SymVar_0, 8)"),
(self.ast.bv(2, 8), "(_ bv2 8)", "0x2"),
(self.ast.bvashr(self.v1, self.v2), "(bvashr SymVar_0 SymVar_1)", "(SymVar_0 >> SymVar_1)"),
(self.ast.bvfalse(), "(_ bv0 1)", "0x0"),
(self.ast.bvnand(self.v1, self.v2), "(bvnand SymVar_0 SymVar_1)", "(~(SymVar_0 & SymVar_1) & 0xFF)"),
(self.ast.bvnor(self.v1, self.v2), "(bvnor SymVar_0 SymVar_1)", "(~(SymVar_0 | SymVar_1) & 0xFF)"),
(self.ast.bvrol(self.v1, self.ast.bv(3, 8)), "((_ rotate_left 3) SymVar_0)", "rol(SymVar_0, 0x3, 8)"),
(self.ast.bvror(self.v2, self.ast.bv(2, 8)), "((_ rotate_right 2) SymVar_1)", "ror(SymVar_1, 0x2, 8)"),
(self.ast.bvsdiv(self.v1, self.v2), "(bvsdiv SymVar_0 SymVar_1)", "(SymVar_0 / SymVar_1)"),
(self.ast.bvsge(self.v1, self.v2), "(bvsge SymVar_0 SymVar_1)", "(SymVar_0 >= SymVar_1)"),
(self.ast.bvsgt(self.v1, self.v2), "(bvsgt SymVar_0 SymVar_1)", "(SymVar_0 > SymVar_1)"),
(self.ast.bvsle(self.v1, self.v2), "(bvsle SymVar_0 SymVar_1)", "(SymVar_0 <= SymVar_1)"),
(self.ast.bvslt(self.v1, self.v2), "(bvslt SymVar_0 SymVar_1)", "(SymVar_0 < SymVar_1)"),
(self.ast.bvsmod(self.v1, self.v2), "(bvsmod SymVar_0 SymVar_1)", "(SymVar_0 % SymVar_1)"),
(self.ast.bvsrem(self.v1, self.v2), "(bvsrem SymVar_0 SymVar_1)", "(SymVar_0 % SymVar_1)"),
(self.ast.bvtrue(), "(_ bv1 1)", "0x1"),
(self.ast.bvurem(self.v1, self.v2), "(bvurem SymVar_0 SymVar_1)", "(SymVar_0 % SymVar_1)"),
(self.ast.bvxnor(self.v1, self.v2), "(bvxnor SymVar_0 SymVar_1)", "(~(SymVar_0 ^ SymVar_1) & 0xFF)"),
(self.ast.compound([self.v1, self.v2]), "SymVar_0\nSymVar_1", "SymVar_0\nSymVar_1"),
(self.ast.concat([self.v1, self.v2]), "(concat SymVar_0 SymVar_1)", "((SymVar_0) << 8 | SymVar_1)"),
(self.ast.declare(self.v1), "(declare-fun SymVar_0 () (_ BitVec 8))", "SymVar_0 = int(input())"),
(self.ast.distinct(self.v1, self.v2), "(distinct SymVar_0 SymVar_1)", "(SymVar_0 != SymVar_1)"),
(self.ast.equal(self.v1, self.v2), "(= SymVar_0 SymVar_1)", "(SymVar_0 == SymVar_1)"),
(self.ast.extract(4, 2, self.v1), "((_ extract 4 2) SymVar_0)", "((SymVar_0 >> 2) & 0x7)"),
(self.ast.extract(6, 0, self.v1), "((_ extract 6 0) SymVar_0)", "(SymVar_0 & 0x7F)"),
(self.ast.extract(7, 0, self.v1), "SymVar_0", "SymVar_0"),
(self.ast.iff(self.v1 == 1, self.v2 == 2), "(iff (= SymVar_0 (_ bv1 8)) (= SymVar_1 (_ bv2 8)))", "((SymVar_0 == 0x1) and (SymVar_1 == 0x2)) or (not (SymVar_0 == 0x1) and not (SymVar_1 == 0x2))"),
(self.ast.ite(self.v1 == 1, self.v1, self.v2), "(ite (= SymVar_0 (_ bv1 8)) SymVar_0 SymVar_1)", "(SymVar_0 if (SymVar_0 == 0x1) else SymVar_1)"),
(self.ast.land([self.v1 == 1, self.v2 == 2]), "(and (= SymVar_0 (_ bv1 8)) (= SymVar_1 (_ bv2 8)))", "((SymVar_0 == 0x1) and (SymVar_1 == 0x2))"),
(self.ast.let("alias", self.v1, self.v2), "(let ((alias SymVar_0)) SymVar_1)", "SymVar_1"),
(self.ast.lnot(self.v1 == 0), "(not (= SymVar_0 (_ bv0 8)))", "not (SymVar_0 == 0x0)"),
(self.ast.lor([self.v1 >= 0, self.v2 <= 10]), "(or (bvuge SymVar_0 (_ bv0 8)) (bvule SymVar_1 (_ bv10 8)))", "((SymVar_0 >= 0x0) or (SymVar_1 <= 0xA))"),
(self.ast.lxor([self.v1 >= 0, self.v2 <= 10]), "(xor (bvuge SymVar_0 (_ bv0 8)) (bvule SymVar_1 (_ bv10 8)))", "(bool((SymVar_0 >= 0x0)) != bool((SymVar_1 <= 0xA)))"),
(self.ast.reference(self.ref), "ref!0", "ref_0"),
(self.ast.string("test"), "test", "test"),
(self.ast.sx(8, self.v1), "((_ sign_extend 8) SymVar_0)", "sx(0x8, SymVar_0)"),
(self.ast.zx(8, self.v1), "((_ zero_extend 8) SymVar_0)", "SymVar_0"),
(self.ast.forall([self.v1], 1 == self.v1), "(forall ((SymVar_0 (_ BitVec 8))) (= SymVar_0 (_ bv1 8)))", "forall([symvar_0], (symvar_0 == 0x1))"),
]
def test_smt_representation(self):
self.ctx.setAstRepresentationMode(AST_REPRESENTATION.SMT)
self.assertEqual(self.ctx.getAstRepresentationMode(), AST_REPRESENTATION.SMT)
for n in self.node:
self.assertEqual(str(n[0]), n[1])
def test_python_representation(self):
self.ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
self.assertEqual(self.ctx.getAstRepresentationMode(), AST_REPRESENTATION.PYTHON)
for n in self.node:
# Note: lower() in order to handle boost-1.55 (from travis) and boost-1.71 (from an up-to-date machine)
self.assertEqual(str(n[0]).lower(), n[2].lower())
def test_lifting(self):
self.assertEqual(self.ctx.liftToSMT(self.ref), smtlifting)
self.assertEqual(self.ctx.liftToPython(self.ref), pythonlifting)
nodes = [
(self.v1 & self.v2),
(self.v1 + self.v2),
(self.v1 - self.v2),
(self.v1 ^ self.v2),
(self.v1 | self.v2),
(self.v1 * self.v2),
(self.v1 / self.v2),
(self.v1 % self.v2),
(self.v1 << self.v2),
(self.v1 >> self.v2),
(~self.v1),
(-self.v1),
(self.v1 == self.v2),
(self.v1 != self.v2),
(self.v1 <= self.v2),
(self.v1 >= self.v2),
(self.v1 < self.v2),
(self.v1 > self.v2),
self.ast.bswap(self.v1),
self.ast.bv(2, 8),
self.ast.bvashr(self.v1, self.v2),
self.ast.bvnand(self.v1, self.v2),
self.ast.bvnor(self.v1, self.v2),
self.ast.bvrol(self.v1, self.ast.bv(3, 8)),
self.ast.bvror(self.v2, self.ast.bv(2, 8)),
self.ast.bvsdiv(self.v1, self.v2),
self.ast.bvsge(self.v1, self.v2),
self.ast.bvsgt(self.v1, self.v2),
self.ast.bvsle(self.v1, self.v2),
self.ast.bvslt(self.v1, self.v2),
self.ast.bvsmod(self.v1, self.v2),
self.ast.bvsrem(self.v1, self.v2),
self.ast.bvurem(self.v1, self.v2),
self.ast.bvxnor(self.v1, self.v2),
self.ast.concat([self.v1, self.v2]),
self.ast.distinct(self.v1, self.v2),
self.ast.equal(self.v1, self.v2),
self.ast.extract(4, 2, self.v1),
self.ast.extract(6, 0, self.v1),
self.ast.extract(7, 0, self.v1),
self.ast.ite(self.v1 == 1, self.v1, self.v2),
self.ast.land([self.v1 == 1, self.v2 == 2]),
self.ast.lnot(self.v1 == 0),
self.ast.lor([self.v1 >= 0, self.v2 <= 10]),
self.ast.lxor([self.v1 >= 0, self.v2 <= 10]),
self.ast.reference(self.ref),
self.ast.sx(8, self.v1),
self.ast.zx(8, self.v1),
]
for n in nodes:
# LLVM
if VERSION.LLVM_INTERFACE is True:
self.assertNotEqual(len(self.ctx.liftToLLVM(n, fname="test", optimize=True)), 0)
# Dot
self.assertNotEqual(len(self.ctx.liftToDot(n)), 0)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
import inspect
import os
import mox
from neutron.agent.linux import iptables_manager
from neutron.tests import base
IPTABLES_ARG = {'bn': iptables_manager.binary_name}
NAT_DUMP = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
FILTER_DUMP = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
class IptablesManagerStateFulTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateFulTestCase, self).setUp()
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper))
self.mox.StubOutWithMock(self.iptables, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_binary_name(self):
self.assertEqual(iptables_manager.binary_name,
os.path.basename(inspect.stack()[-1][1])[:16])
def test_get_chain_name(self):
name = '0123456789' * 5
# 28 chars is the maximum length of iptables chain name.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=False),
name[:28])
# 11 chars is the maximum length of chain name of iptable_manager
# if binary_name is prepended.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=True),
name[:11])
def test_add_and_remove_chain_custom_binary_name(self):
bn = ("abcdef" * 5)
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper,
binary_name=bn))
self.mox.StubOutWithMock(self.iptables, "execute")
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
iptables_args = {'bn': bn[:16]}
filter_dump = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump_mod,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].empty_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_empty_chain_custom_binary_name(self):
bn = ("abcdef" * 5)[:16]
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper,
binary_name=bn))
self.mox.StubOutWithMock(self.iptables, "execute")
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
iptables_args = {'bn': bn}
filter_dump = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-filter -s 0/0 -d 192.168.0.2\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump_mod,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump + filter_dump,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter',
'-s 0/0 -d 192.168.0.2')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_and_remove_chain(self):
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
self.iptables.execute(['iptables-restore', '-c'],
process_input=NAT_DUMP + filter_dump_mod,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=NAT_DUMP + FILTER_DUMP,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_filter_rule(self):
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
'[0:0] -A FORWARD -j neutron-filter-top\n'
'[0:0] -A OUTPUT -j neutron-filter-top\n'
'[0:0] -A neutron-filter-top -j %(bn)s-local\n'
'[0:0] -A INPUT -j %(bn)s-INPUT\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A FORWARD -j %(bn)s-FORWARD\n'
'[0:0] -A %(bn)s-filter -j DROP\n'
'[0:0] -A %(bn)s-INPUT -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
self.iptables.execute(['iptables-restore', '-c'],
process_input=NAT_DUMP + filter_dump_mod,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=NAT_DUMP + FILTER_DUMP,
root_helper=self.root_helper
).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_nat_rule(self):
nat_dump = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
nat_dump_mod = ('# Generated by iptables_manager\n'
'*nat\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-nat - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'[0:0] -A PREROUTING -j %(bn)s-PREROUTING\n'
'[0:0] -A OUTPUT -j %(bn)s-OUTPUT\n'
'[0:0] -A POSTROUTING -j %(bn)s-POSTROUTING\n'
'[0:0] -A POSTROUTING -j neutron-postrouting-bottom\n'
'[0:0] -A neutron-postrouting-bottom -j %(bn)s-snat\n'
'[0:0] -A %(bn)s-snat -j %(bn)s-float-snat\n'
'[0:0] -A %(bn)s-PREROUTING -d 192.168.0.3 -j '
'%(bn)s-nat\n'
'[0:0] -A %(bn)s-nat -p tcp --dport 8080 -j '
'REDIRECT --to-port 80\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump_mod + FILTER_DUMP,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-c'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore', '-c'],
process_input=nat_dump + FILTER_DUMP,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['nat'].add_chain('nat')
self.iptables.ipv4['nat'].add_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].add_rule('nat',
'-p tcp --dport 8080' +
' -j REDIRECT --to-port 80')
self.iptables.apply()
self.iptables.ipv4['nat'].remove_rule('nat',
'-p tcp --dport 8080 -j'
' REDIRECT --to-port 80')
self.iptables.ipv4['nat'].remove_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].remove_chain('nat')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_rule_to_a_nonexistent_chain(self):
self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
'nonexistent', '-j DROP')
def test_remove_nonexistent_chain(self):
self.mox.StubOutWithMock(iptables_manager, "LOG")
iptables_manager.LOG.warn(('Attempted to remove chain %s which does '
'not exist'), 'nonexistent')
self.mox.ReplayAll()
self.iptables.ipv4['filter'].remove_chain('nonexistent')
self.mox.VerifyAll()
def test_remove_nonexistent_rule(self):
self.mox.StubOutWithMock(iptables_manager, "LOG")
iptables_manager.LOG.warn('Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False,
'rule': '-j DROP',
'chain': 'nonexistent'})
self.mox.ReplayAll()
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
self.mox.VerifyAll()
def test_get_traffic_counters_chain_notexists(self):
iptables_dump = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
self.iptables.execute(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.iptables.execute(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x'],
root_helper=self.root_helper
).AndReturn('')
self.iptables.execute(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.mox.ReplayAll()
acc = self.iptables.get_traffic_counters('chain1')
self.assertIsNone(acc)
def test_get_traffic_counters(self):
iptables_dump = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
self.iptables.execute(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.iptables.execute(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x'],
root_helper=self.root_helper
).AndReturn('')
self.iptables.execute(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.mox.ReplayAll()
acc = self.iptables.get_traffic_counters('OUTPUT')
self.assertEqual(acc['pkts'], 1600)
self.assertEqual(acc['bytes'], 263604)
self.mox.VerifyAll()
def test_get_traffic_counters_with_zero(self):
iptables_dump = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
self.iptables.execute(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.iptables.execute(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
root_helper=self.root_helper
).AndReturn('')
self.iptables.execute(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
root_helper=self.root_helper
).AndReturn(iptables_dump)
self.mox.ReplayAll()
acc = self.iptables.get_traffic_counters('OUTPUT', zero=True)
self.assertEqual(acc['pkts'], 1600)
self.assertEqual(acc['bytes'], 263604)
self.mox.VerifyAll()
class IptablesManagerStateLessTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateLessTestCase, self).setUp()
self.iptables = (iptables_manager.IptablesManager(state_less=True))
def test_nat_not_found(self):
self.assertFalse('nat' in self.iptables.ipv4)
|
|
#!/usr/bin/env python2
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import glob
import re
import urllib
import socket
from contextlib import closing
from collections import defaultdict
from hibench_prop_env_mapping import HiBenchEnvPropMappingMandatory, HiBenchEnvPropMapping
HibenchConf = {}
HibenchConfRef = {}
# FIXME: use log helper later
def log(*s):
if len(s) == 1:
s = s[0]
else:
s = " ".join([str(x) for x in s])
sys.stderr.write(str(s) + '\n')
def log_debug(*s):
# log(*s)
pass
# copied from http://stackoverflow.com/questions/3575554/python-subprocess-with-timeout-and-large-output-64k
# Comment: I have a better solution, but I'm too lazy to write.
import fcntl
import os
import subprocess
import time
def nonBlockRead(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return ''
def execute_cmd(cmdline, timeout):
"""
Execute cmdline, limit execution time to 'timeout' seconds.
Uses the subprocess module and subprocess.PIPE.
Raises TimeoutInterrupt
"""
p = subprocess.Popen(
cmdline,
bufsize=0, # default value of 0 (unbuffered) is best
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
t_begin = time.time() # Monitor execution time
seconds_passed = 0
stdout = ''
stderr = ''
while p.poll() is None and (
seconds_passed < timeout or timeout == 0): # Monitor process
time.sleep(0.1) # Wait a little
seconds_passed = time.time() - t_begin
stdout += nonBlockRead(p.stdout)
stderr += nonBlockRead(p.stderr)
if seconds_passed >= timeout and timeout > 0:
try:
p.stdout.close() # If they are not closed the fds will hang around until
p.stderr.close() # os.fdlimit is exceeded and cause a nasty exception
p.terminate() # Important to close the fds prior to terminating the process!
# NOTE: Are there any other "non-freed" resources?
except:
pass
return ('Timeout', stdout, stderr)
return (p.returncode, stdout, stderr)
def shell(cmd, timeout=5):
assert not "${" in cmd, "Error, missing configurations: %s" % ", ".join(
re.findall("\$\{(.*)\}", cmd))
retcode, stdout, stderr = execute_cmd(cmd, timeout)
if retcode == 'Timeout':
log("ERROR, execute cmd: '%s' timedout." % cmd)
log(" STDOUT:\n" + stdout)
log(" STDERR:\n" + stderr)
log(" Please check!")
assert 0, cmd + " executed timedout for %d seconds" % timeout
return stdout
def exactly_one_file(filename_candidate_list, config_name):
for filename_pattern in filename_candidate_list:
result = exactly_one_file_one_candidate(filename_pattern)
if result != "":
return result
assert 0, "No files found under certain path(s), please set `" + config_name + "` manually"
def exactly_one_file_one_candidate(filename_pattern):
files = glob.glob(filename_pattern)
if len(files) == 0:
return ""
elif len(files) == 1:
return files[0]
else:
assert 0, "The pattern " + filename_pattern + \
" matches more than one file, please remove the redundant files"
def read_file_content(filepath):
file_content = []
if(len(glob.glob(filepath)) == 1):
with open(filepath) as f:
file_content = f.readlines()
return file_content
def parse_conf(conf_root, workload_config_file):
conf_files = sorted(glob.glob(conf_root + "/*.conf")) + sorted(glob.glob(workload_config_file))
# load values from conf files
for filename in conf_files:
log("Parsing conf: %s" % filename)
with open(filename) as f:
for line in f.readlines():
line = line.strip()
if not line:
continue # skip empty lines
if line[0] == '#':
continue # skip comments
try:
key, value = re.split("\s", line, 1)
except ValueError:
key = line.strip()
value = ""
HibenchConf[key] = value.strip()
HibenchConfRef[key] = filename
def override_conf_from_environment():
# override values from os environment variable settings
for env_name, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
# The overrides from environments has 2 premises, the second one is either
# the prop_name is not set in advance by config files or the conf line
# itself set an env variable to a hibench conf
if env_name in os.environ and (not HibenchConf.get(
prop_name) or HibenchConf.get(prop_name) == "$" + env_name):
env_value = os.getenv(env_name)
HibenchConf[prop_name] = env_value
HibenchConfRef[prop_name] = "OS environment variable:%s" % env_name
def override_conf_by_paching_conf():
# override values from os environment variable settings
# for env_name, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
# if env_name in os.environ:
# env_value = os.getenv(env_name)
# HibenchConf[prop_name] = env_value
# HibenchConfRef[prop_name] = "OS environment variable:%s" % env_name
# override values by patching config
for item in [x for x in patching_config.split(',') if x]:
key, value = re.split('=', item, 1)
HibenchConf[key] = value.strip()
HibenchConfRef[key] = "Overrided by parent script during calling: " + item
def load_config(conf_root, workload_config_file, workload_folder, patching_config=""):
abspath = os.path.abspath
conf_root = abspath(conf_root)
workload_config_file = abspath(workload_config_file)
# get current workload's framework name and store it in framework_name
(dir, framework_name) = os.path.split(workload_folder)
# get workload name
workload_name = os.path.basename(dir)
parse_conf(conf_root, workload_config_file)
override_conf_from_environment()
override_conf_by_paching_conf()
# generate ref values, replace "${xxx}" to its values
waterfall_config()
# generate auto probe values
generate_optional_value()
# generate ref values again to ensure all values can be found
waterfall_config(force=True)
# check
check_config()
#import pdb;pdb.set_trace()
# Export config to file, let bash script to import as local variables.
print export_config(workload_name, framework_name)
def check_config(): # check configures
# Ensure mandatory configures are available
for _, prop_name in HiBenchEnvPropMappingMandatory.items():
assert HibenchConf.get(
prop_name, None) is not None, "Mandatory configure missing: %s" % prop_name
# Ensure all ref values in configure has been expanded
for _, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
assert "${" not in HibenchConf.get(prop_name, ""), "Unsolved ref key: %s. \n Defined at %s:\n Unsolved value:%s\n" % (
prop_name, HibenchConfRef.get(prop_name, "unknown"), HibenchConf.get(prop_name, "unknown"))
def waterfall_config(force=False): # replace "${xxx}" to its values
no_value_sign = "___###NO_VALUE_SIGN###___"
def process_replace(m):
raw_key = m.groups()[0]
# key, default_value = (raw_key[2:-1].strip().split(":-") + [None])[:2]
key, spliter, default_value = (
re.split("(:-|:_)", raw_key[2:-1].strip()) + [None, None])[:3]
log_debug(
"key:",
key,
" value:",
HibenchConf.get(
key,
"RAWKEY:" +
raw_key),
"default value:" +
repr(default_value))
if force:
if default_value is None:
return HibenchConf.get(key)
else:
if spliter == ':_' and not default_value: # no return
return no_value_sign
return HibenchConf.get(key, default_value)
else:
return HibenchConf.get(key, "") or raw_key
# we assume that the users need wildcard_replacement while the number of stars in key equals that of value,
# otherwise they want to treat the configuration line as normal, just pass it to the framework
def wildcard_replacement(key, value):
if "*" in key:
# we meet a wildcard replacement situation
if len(key.split("*")) == len(value.split("*")):
key_searcher = re.compile("^" + "(.*)".join(key.split("*")) + "$")
matched_keys_to_remove = []
for k in HibenchConf.keys():
matched_keys = key_searcher.match(k)
if matched_keys:
matched_keys_to_remove.append(k)
if not "*" in k:
splited_value = value.split("*")
new_key = splited_value[
0] + "".join([matched_keys.groups()[idx] + x for idx, x in enumerate(splited_value[1:])])
HibenchConf[new_key] = HibenchConf[k]
HibenchConfRef[
new_key] = "Generated by wildcard rule: %s -> %s" % (key, value)
for key in matched_keys_to_remove:
del HibenchConf[key]
return True
else:
del HibenchConf[key]
return True
return False
p = re.compile("(\$\{\s*[^\s^\$^\}]+\s*\})")
wildcard_rules = []
finish = False
while True:
while not finish:
finish = True
for key, value in HibenchConf.items():
old_value = value
old_key = key
key = p.sub(process_replace, key)
value = p.sub(process_replace, value)
if key != old_key:
#log_debug("update key:", key, old_key)
HibenchConf[key] = HibenchConf[old_key]
del HibenchConf[old_key]
finish = False
elif value != old_value: # we have updated value, try again
# log_debug("Waterfall conf: %s: %s -> %s" % (key, old_value, value))
HibenchConf[key] = value
finish = False
wildcard_rules = [(key, HibenchConf[key]) for key in HibenchConf if "*" in key]
# now, let's check wildcard replacement rules
for key, value in wildcard_rules:
# check if we found a rule like: aaa.*.ccc.*.ddd -> bbb.*.*
# wildcard replacement is useful for samza conf, which
# seems can place anything under its conf namespaces.
# The first wildcard in key will match the first wildcard
# in value, etc. The number of wildcard in key and value
# must be identical. However, it'll be impossible to
# switch the order of two wildcards, something like the
# first wildcard in key to match the second wildcard in
# value. I just don't think it'll be needed.
if not wildcard_replacement(key, value): # not wildcard rules? re-add
HibenchConf[key] = value
if wildcard_rules: # need try again
wildcard_rules = []
else:
break
# all finished, remove values contains no_value_sign
for key in [x for x in HibenchConf if no_value_sign in HibenchConf[x]]:
del HibenchConf[key]
del HibenchConfRef[key]
def probe_java_bin():
# probe JAVA_HOME
if not HibenchConf.get("java.bin", ""):
# probe java bin
if os.environ.get('JAVA_HOME', ''):
# lookup in os environment
HibenchConf['java.bin'] = os.path.join(os.environ.get('JAVA_HOME'), "bin", "java")
HibenchConfRef['java.bin'] = "probed from os environment of JAVA_HOME"
else:
# lookup in path
path_dirs = os.environ.get('PATH', '').split(':')
for path in path_dirs:
if os.path.isfile(os.path.join(path, "java")):
HibenchConf['java.bin'] = os.path.join(path, "java")
HibenchConfRef['java.bin'] = "probed by lookup in $PATH: " + path
break
else:
# still not found?
assert 0, "JAVA_HOME unset or can't found java executable in $PATH"
def probe_hadoop_release():
# probe hadoop release. CDH(only support CDH 5 in HiBench 6.0), HDP, or
# apache
if not HibenchConf.get("hibench.hadoop.release", ""):
cmd_release_and_version = HibenchConf['hibench.hadoop.executable'] + ' version | head -1'
# version here means, for example apache hadoop {2.7.3}
hadoop_release_and_version = shell(cmd_release_and_version).strip()
HibenchConf["hibench.hadoop.release"] = \
"cdh4" if "cdh4" in hadoop_release_and_version else \
"cdh5" if "cdh5" in hadoop_release_and_version else \
"apache" if "Hadoop" in hadoop_release_and_version else \
"UNKNOWN"
HibenchConfRef["hibench.hadoop.release"] = "Inferred by: hadoop executable, the path is:\"%s\"" % HibenchConf[
'hibench.hadoop.executable']
assert HibenchConf["hibench.hadoop.release"] in ["cdh4", "cdh5", "apache",
"hdp"], "Unknown hadoop release. Auto probe failed, please override `hibench.hadoop.release` to explicitly define this property"
assert HibenchConf[
"hibench.hadoop.release"] != "cdh4", "Hadoop release CDH4 is not supported in HiBench6.0, please upgrade to CDH5 or use Apache Hadoop/HDP"
def probe_hadoop_examples_jars():
# probe hadoop example jars
if not HibenchConf.get("hibench.hadoop.examples.jar", ""):
examples_jars_candidate_apache0 = HibenchConf[
'hibench.hadoop.home'] + "/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar"
examples_jars_candidate_cdh0 = HibenchConf[
'hibench.hadoop.home'] + "/share/hadoop/mapreduce2/hadoop-mapreduce-examples-*.jar"
examples_jars_candidate_cdh1 = HibenchConf[
'hibench.hadoop.home'] + "/../../jars/hadoop-mapreduce-examples-*.jar"
examples_jars_candidate_hdp0 = HibenchConf[
'hibench.hadoop.home'] + "/../hadoop-mapreduce-client/hadoop-mapreduce-examples.jar"
examples_jars_candidate_hdp1 = HibenchConf[
'hibench.hadoop.home'] + "/../hadoop-mapreduce/hadoop-mapreduce-examples.jar"
examples_jars_candidate_list = [
examples_jars_candidate_apache0,
examples_jars_candidate_cdh0,
examples_jars_candidate_cdh1,
examples_jars_candidate_hdp0,
examples_jars_candidate_hdp1]
HibenchConf["hibench.hadoop.examples.jar"] = exactly_one_file(
examples_jars_candidate_list, "hibench.hadoop.examples.jar")
HibenchConfRef["hibench.hadoop.examples.jar"] = "Inferred by " + \
HibenchConf["hibench.hadoop.examples.jar"]
def probe_hadoop_examples_test_jars():
# probe hadoop examples test jars
if not HibenchConf.get("hibench.hadoop.examples.test.jar", ""):
examples_test_jars_candidate_apache0 = HibenchConf[
'hibench.hadoop.home'] + "/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient*-tests.jar"
examples_test_jars_candidate_cdh0 = HibenchConf[
'hibench.hadoop.home'] + "/share/hadoop/mapreduce2/hadoop-mapreduce-client-jobclient*-tests.jar"
examples_test_jars_candidate_cdh1 = HibenchConf[
'hibench.hadoop.home'] + "/../../jars/hadoop-mapreduce-client-jobclient*-tests.jar"
examples_test_jars_candidate_hdp0 = HibenchConf[
'hibench.hadoop.home'] + "/../hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient-tests.jar"
examples_test_jars_candidate_hdp1 = HibenchConf[
'hibench.hadoop.home'] + "/../hadoop-mapreduce/hadoop-mapreduce-client-jobclient-tests.jar"
examples_test_jars_candidate_list = [
examples_test_jars_candidate_apache0,
examples_test_jars_candidate_cdh0,
examples_test_jars_candidate_cdh1,
examples_test_jars_candidate_hdp0,
examples_test_jars_candidate_hdp1]
HibenchConf["hibench.hadoop.examples.test.jar"] = exactly_one_file(
examples_test_jars_candidate_list, "hibench.hadoop.examples.test.jar")
HibenchConfRef["hibench.hadoop.examples.test.jar"] = "Inferred by " + \
HibenchConf["hibench.hadoop.examples.test.jar"]
def probe_sleep_job_jar():
# set hibench.sleep.job.jar
if not HibenchConf.get('hibench.sleep.job.jar', ''):
log("probe sleep jar:", HibenchConf['hibench.hadoop.examples.test.jar'])
HibenchConf["hibench.sleep.job.jar"] = HibenchConf['hibench.hadoop.examples.test.jar']
HibenchConfRef[
"hibench.sleep.job.jar"] = "Refer to `hibench.hadoop.examples.test.jar` according to the evidence of `hibench.hadoop.release`"
def probe_hadoop_configure_dir():
# probe hadoop configuration files
if not HibenchConf.get("hibench.hadoop.configure.dir", ""):
# For Apache, HDP, and CDH release
HibenchConf["hibench.hadoop.configure.dir"] = join(
HibenchConf["hibench.hadoop.home"], "etc", "hadoop")
HibenchConfRef["hibench.hadoop.configure.dir"] = "Inferred by: `hibench.hadoop.home`"
def probe_mapper_reducer_names():
# set hadoop mapper/reducer property names
if not HibenchConf.get("hibench.hadoop.mapper.name", ""):
HibenchConf["hibench.hadoop.mapper.name"] = "mapreduce.job.maps"
HibenchConfRef["hibench.hadoop.mapper.name"] = "Use default mapper name"
if not HibenchConf.get("hibench.hadoop.reducer.name", ""):
HibenchConf["hibench.hadoop.reducer.name"] = "mapreduce.job.reduces"
HibenchConfRef["hibench.hadoop.reducer.name"] = "Use default reducer name"
def probe_spark_conf_value(conf_name, default_value):
spark_home = HibenchConf.get("hibench.spark.home", "")
assert spark_home, "`hibench.spark.home` undefined, please fix it and retry"
join = os.path.join
spark_env_file = join(spark_home, "conf/spark-env.sh")
value = default_value
file_content = read_file_content(spark_env_file)
for line in file_content:
if not line.strip().startswith(
"#") and conf_name in line:
if "\"" in line:
value = line.split("=")[1].split("\"")[1]
elif "\'" in line:
value = line.split("=")[1].split("\'")[1]
else:
value = line.split("=")[1]
value = value.strip()
return value
def probe_spark_master_webui_port():
return probe_spark_conf_value("SPARK_MASTER_WEBUI_PORT", "8080")
def probe_spark_worker_webui_port():
return probe_spark_conf_value("SPARK_WORKER_WEBUI_PORT", "8081")
def probe_masters_slaves_by_Yarn():
yarn_executable = os.path.join(os.path.dirname(
HibenchConf['hibench.hadoop.executable']), "yarn")
cmd = "( " + yarn_executable + " node -list 2> /dev/null | grep RUNNING )"
try:
worker_hostnames = [line.split(":")[0] for line in shell(cmd).split("\n")]
HibenchConf['hibench.slaves.hostnames'] = " ".join(worker_hostnames)
HibenchConfRef['hibench.slaves.hostnames'] = "Probed by parsing results from: " + cmd
# parse yarn resource manager from hadoop conf
yarn_site_file = os.path.join(HibenchConf["hibench.hadoop.configure.dir"], "yarn-site.xml")
with open(yarn_site_file) as f:
file_content = f.read()
match_address = re.findall(
"\<property\>\s*\<name\>\s*yarn.resourcemanager.address[.\w\s]*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)?\<\/value\>",
file_content)
match_hostname = re.findall(
"\<property\>\s*\<name\>\s*yarn.resourcemanager.hostname[.\w\s]*\<\/name\>\s*\<value\>([a-zA-Z\-\._0-9]+)(:\d+)?\<\/value\>",
file_content)
if match_address:
resourcemanager_hostname = match_address[0][0]
HibenchConf['hibench.masters.hostnames'] = resourcemanager_hostname
HibenchConfRef['hibench.masters.hostnames'] = "Parsed from " + yarn_site_file
elif match_hostname:
resourcemanager_hostname = match_hostname[0][0]
HibenchConf['hibench.masters.hostnames'] = resourcemanager_hostname
HibenchConfRef['hibench.masters.hostnames'] = "Parsed from " + yarn_site_file
else:
assert 0, "Unknown resourcemanager, please check `hibench.hadoop.configure.dir` and \"yarn-site.xml\" file"
except Exception as e:
assert 0, "Get workers from yarn-site.xml page failed, reason:%s\nplease set `hibench.masters.hostnames` and `hibench.slaves.hostnames` manually" % e
def probe_masters_slaves_hostnames():
# probe masters, slaves hostnames
# determine running mode according to spark master configuration
if not (
HibenchConf.get(
"hibench.masters.hostnames",
"") and HibenchConf.get(
"hibench.slaves.hostnames",
"")): # no pre-defined hostnames, let's probe
if not (HibenchConf.get("hibench.spark.master", "")):
probe_masters_slaves_by_Yarn()
else:
spark_master = HibenchConf['hibench.spark.master']
# local mode
if spark_master.startswith("local"):
HibenchConf['hibench.masters.hostnames'] = '' # no master
# localhost as slaves
HibenchConf['hibench.slaves.hostnames'] = 'localhost'
HibenchConfRef['hibench.masters.hostnames'] = HibenchConfRef[
'hibench.slaves.hostnames'] = "Probed by the evidence of 'hibench.spark.master=%s'" % spark_master
# spark standalone mode
elif spark_master.startswith("spark"):
HibenchConf['hibench.masters.hostnames'] = spark_master[8:].split(":")[0]
HibenchConfRef[
'hibench.masters.hostnames'] = "Probed by the evidence of 'hibench.spark.master=%s'" % spark_master
try:
log(spark_master, HibenchConf['hibench.masters.hostnames'])
master_port = probe_spark_master_webui_port()
worker_port = probe_spark_worker_webui_port()
# Make the assumption that the master is in internal network, and force
# not to use any proxies
with closing(urllib.urlopen('http://%s:%s' % (HibenchConf['hibench.masters.hostnames'], master_port), proxies={})) as page:
worker_hostnames = []
for x in page.readlines():
if worker_port in x and "worker" in x:
worker_hostnames.append(re.findall("http:\/\/([a-zA-Z\-\._0-9]+):%s" % worker_port, x)[0])
HibenchConf['hibench.slaves.hostnames'] = " ".join(worker_hostnames)
HibenchConfRef['hibench.slaves.hostnames'] = "Probed by parsing " + \
'http://%s:%s' % (HibenchConf['hibench.masters.hostnames'], master_port)
except Exception as e:
assert 0, "Get workers from spark master's web UI page failed, \nPlease check your configurations, network settings, proxy settings, or set `hibench.masters.hostnames` and `hibench.slaves.hostnames` manually, master_port: %s, slave_port:%s" % (
master_port, worker_port)
# yarn mode
elif spark_master.startswith("yarn"):
probe_masters_slaves_by_Yarn()
# reset hostnames according to gethostbyaddr
names = set(HibenchConf['hibench.masters.hostnames'].split() +
HibenchConf['hibench.slaves.hostnames'].split())
new_name_mapping = {}
for name in names:
try:
new_name_mapping[name] = socket.gethostbyaddr(name)[0]
except: # host name lookup failure?
new_name_mapping[name] = name
HibenchConf['hibench.masters.hostnames'] = repr(" ".join(
[new_name_mapping[x] for x in HibenchConf['hibench.masters.hostnames'].split()]))
HibenchConf['hibench.slaves.hostnames'] = repr(" ".join(
[new_name_mapping[x] for x in HibenchConf['hibench.slaves.hostnames'].split()]))
def probe_java_opts():
file_name = os.path.join(HibenchConf['hibench.hadoop.configure.dir'], 'mapred-site.xml')
cnt = 0
map_java_opts_line = ""
reduce_java_opts_line = ""
lines = read_file_content(file_name)
content = ""
for line in lines:
content = content + line
# Do the split for itself so as to deal with any weird xml style
content = content.split("<value>")
for line in content:
if "mapreduce.map.java.opts" in line and cnt + 1 < len(content):
map_java_opts_line = content[cnt + 1]
if "mapreduce.reduce.java.opts" in line and cnt + 1 < len(content):
reduce_java_opts_line = content[cnt + 1]
cnt += 1
def add_quotation_marks(line):
if not (line.startswith("'") or line.startswith("\"")):
return repr(line)
if map_java_opts_line != "":
HibenchConf['hibench.dfsioe.map.java_opts'] = add_quotation_marks(
map_java_opts_line.split("<")[0].strip())
HibenchConfRef['hibench.dfsioe.map.java_opts'] = "Probed by configuration file:'%s'" % os.path.join(
HibenchConf['hibench.hadoop.configure.dir'], 'mapred-site.xml')
if reduce_java_opts_line != "":
HibenchConf['hibench.dfsioe.red.java_opts'] = add_quotation_marks(
reduce_java_opts_line.split("<")[0].strip())
HibenchConfRef['hibench.dfsioe.red.java_opts'] = "Probed by configuration file:'%s'" % os.path.join(
HibenchConf['hibench.hadoop.configure.dir'], 'mapred-site.xml')
def generate_optional_value():
# get some critical values from environment or make a guess
d = os.path.dirname
join = os.path.join
HibenchConf['hibench.home'] = d(d(d(os.path.abspath(__file__))))
del d
HibenchConfRef['hibench.home'] = "Inferred from relative path of dirname(%s)/../../" % __file__
probe_java_bin()
probe_hadoop_release()
probe_hadoop_examples_jars()
probe_hadoop_examples_test_jars()
probe_sleep_job_jar()
probe_hadoop_configure_dir()
probe_mapper_reducer_names()
probe_masters_slaves_hostnames()
probe_java_opts()
def export_config(workload_name, framework_name):
join = os.path.join
report_dir = HibenchConf['hibench.report.dir']
conf_dir = join(report_dir, workload_name, framework_name, 'conf')
conf_filename = join(conf_dir, "%s.conf" % workload_name)
spark_conf_dir = join(conf_dir, "sparkbench")
spark_prop_conf_filename = join(spark_conf_dir, "spark.conf")
sparkbench_prop_conf_filename = join(spark_conf_dir, "sparkbench.conf")
if not os.path.exists(spark_conf_dir):
os.makedirs(spark_conf_dir)
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
# generate configure for hibench
sources = defaultdict(list)
for env_name, prop_name in HiBenchEnvPropMappingMandatory.items() + HiBenchEnvPropMapping.items():
source = HibenchConfRef.get(prop_name, 'None')
sources[source].append('%s=%s' % (env_name, HibenchConf.get(prop_name, '')))
with open(conf_filename, 'w') as f:
for source in sorted(sources.keys()):
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(sources[source])))
f.write("\n\n")
f.write("#Source: add for internal usage\n")
f.write("SPARKBENCH_PROPERTIES_FILES=%s\n" % sparkbench_prop_conf_filename)
f.write("SPARK_PROP_CONF=%s\n" % spark_prop_conf_filename)
f.write("WORKLOAD_RESULT_FOLDER=%s\n" % join(conf_dir, ".."))
f.write("HIBENCH_WORKLOAD_CONF=%s\n" % conf_filename)
f.write("export HADOOP_EXECUTABLE\n")
f.write("export HADOOP_CONF_DIR\n")
# generate properties for spark & sparkbench
sources = defaultdict(list)
for prop_name, prop_value in HibenchConf.items():
source = HibenchConfRef.get(prop_name, 'None')
sources[source].append('%s\t%s' % (prop_name, prop_value))
# generate configure for sparkbench
with open(spark_prop_conf_filename, 'w') as f:
for source in sorted(sources.keys()):
items = [x for x in sources[source] if x.startswith("spark.")]
if items:
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(items)))
f.write("\n\n")
# generate configure for spark
with open(sparkbench_prop_conf_filename, 'w') as f:
for source in sorted(sources.keys()):
items = [x for x in sources[source] if x.startswith(
"sparkbench.") or x.startswith("hibench.")]
if items:
f.write("# Source: %s\n" % source)
f.write("\n".join(sorted(items)))
f.write("\n\n")
return conf_filename
if __name__ == "__main__":
if len(sys.argv) < 4:
raise Exception(
"Please supply <conf root path>, <workload root path>, <workload folder path> [<patch config lists, seperated by comma>")
conf_root, workload_configFile, workload_folder = sys.argv[1], sys.argv[2], sys.argv[3]
if len(sys.argv) > 4:
patching_config = sys.argv[4]
else:
patching_config = ''
load_config(conf_root, workload_configFile, workload_folder, patching_config)
|
|
"""
epistar_lpd8806.py: Raspberry Pi library for the Epistar LPD8806 RGB Strand
Provides the ability to drive a LPD8806 based strand of RGB leds from the Raspberry Pi
Colors are provided as RGB and converted internally to the strand's 7 bit values.
The leds are available here:
https://www.aliexpress.com/item/1M-5M-LPD8806-32leds-m-48leds-m-52leds-m-60leds-m-optional-Waterproof-or-Non-Waterproof/32428471835.html
Wiring:
Pi MOSI -> Strand DI
Pi SCLK -> Strand CI
Make sure to use an external power supply to power the strand.
"""
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(current_dir, "..")
import sys
sys.path.append(parent_dir)
import threading
import signal
from lib import logger
from lib import error
class Error(error.Generic):
"""Base class for ledstrip module exceptions"""
pass
class InputError(Error):
"""Range error"""
pass
class Strand(threading.Thread):
BLINK_STEPS = 30
def __init__(self, num_leds=32, spidev='/dev/spidev0.0', simulate_mode=False):
threading.Thread.__init__(self)
self.logger = logger.Logger('Strand')
self.terminate = False
self.lock = threading.Lock()
self.update_event = threading.Event()
if num_leds < 1:
raise InputError('num_leds must be greater than zero')
self.num_leds = num_leds
self.sleep_interval_s = 0.030
if simulate_mode:
self.sleep_interval_s = 3.000
self.simulate_mode = simulate_mode
self.spidev = spidev
self.spi = None
self.gamma = bytearray(256)
for i in range(256):
# Color calculations from http://learn.adafruit.com/light-painting-with-raspberry-pi
self.gamma[i] = 0x80 | int(pow(float(i) / 255.0, 2.5) * 127.0 + 0.5)
self.led_colour = [bytearray(3) for x in range(self.num_leds)]
self.buffer = [[0.0, 0.0, 0.0] for x in range(self.num_leds)]
self.blink = [False for x in range(self.num_leds)]
self.blink_step = [[0.0, 0.0, 0.0] for x in range(self.num_leds)]
self.blink_direction = [True for x in range(self.num_leds)]
if not self.simulate_mode:
self.spi = open(self.spidev, 'wb')
self.fill(0, 0, 0)
signal.signal(signal.SIGTERM, self._handle_signals)
signal.signal(signal.SIGINT, self._handle_signals)
def _handle_signals(self, signum, stack):
if signum == signal.SIGTERM or signum == signal.SIGINT:
self.stop()
def alloff(self):
"""
Turns off all LEDs.
"""
self.fill(0, 0, 0)
def __verify_start_end_range(self, start_index, end_index):
if start_index < 0 or \
start_index >= self.num_leds :
raise InputError('start_index out of range')
if start_index >= end_index:
raise InputError('start_index must be less than end_index')
if end_index < 0 or \
end_index > self.num_leds :
raise InputError('end_index out of range')
def fill(self, r, g, b, blink=False, start_index=None, end_index=None):
"""
Fills a range of LEDs with the specific colour.
"""
if start_index is None:
start_index = 0
if end_index is None or end_index == 0:
end_index = self.num_leds
self.__verify_start_end_range(start_index, end_index)
if r < 0 or r >= 256 or \
g < 0 or g >= 256 or \
b < 0 or b >= 256 :
raise InputError('rgb out of range')
self.lock.acquire()
try:
changed = False
for i in range(start_index, end_index):
if self.__setled(i, r, g, b, blink):
changed = True
if changed:
self.update_event.set()
finally:
self.lock.release()
def setblink(self, pixel_index, blink):
"""
Set a single LED blink state
"""
if pixel_index < 0 or \
pixel_index >= self.num_leds :
raise InputError('pixel_index out of range')
self.lock.acquire()
try:
# no blinking if LED is off
if self.led_colour[pixel_index][0] == 0 and \
self.led_colour[pixel_index][1] == 0 and \
self.led_colour[pixel_index][2] == 0:
blink = False
if self.blink[pixel_index] != blink:
self.blink[pixel_index] = blink
self.update_event.set()
finally:
self.lock.release()
def setblinkrange(self, blink, start_index=None, end_index=None):
"""
Set a blink state on a range of LEDs
"""
if start_index is None:
start_index = 0
if end_index is None or end_index == 0:
end_index = self.num_leds
self.__verify_start_end_range(start_index, end_index)
self.lock.acquire()
try:
changed = False
for i in range(start_index, end_index):
tmp_blink = blink
# no blinking if LED is off
if self.led_colour[i][0] == 0 and \
self.led_colour[i][1] == 0 and \
self.led_colour[i][2] == 0:
tmp_blink = False
if self.blink[i] != tmp_blink:
self.blink[i] = tmp_blink
changed = True
if changed:
self.update_event.set()
finally:
self.lock.release()
def setled(self, pixel_index, r, g, b, blink=False):
"""
Set a single LED a specific colour
"""
if pixel_index < 0 or \
pixel_index >= self.num_leds :
raise InputError('pixel_index out of range')
if r < 0 or r >= 256 or \
g < 0 or g >= 256 or \
b < 0 or b >= 256 :
raise InputError('rgb out of range')
self.lock.acquire()
try:
changed = self.__setled(pixel_index, r, g, b, blink)
if changed:
self.update_event.set()
finally:
self.lock.release()
def __setled(self, pixel_index, r, g, b, blink):
changed = False
if self.led_colour[pixel_index][0] != b:
self.led_colour[pixel_index][0] = b
self.buffer[pixel_index][0] = b
self.blink_step[pixel_index][0] = float(b)/Strand.BLINK_STEPS
self.blink_direction[pixel_index] = False
changed = True
if self.led_colour[pixel_index][1] != r:
self.led_colour[pixel_index][1] = r
self.buffer[pixel_index][1] = r
self.blink_step[pixel_index][1] = float(r)/Strand.BLINK_STEPS
self.blink_direction[pixel_index] = False
changed = True
if self.led_colour[pixel_index][2] != g:
self.led_colour[pixel_index][2] = g
self.buffer[pixel_index][2] = g
self.blink_step[pixel_index][2] = float(g)/Strand.BLINK_STEPS
self.blink_direction[pixel_index] = False
changed = True
if self.blink[pixel_index] != blink:
self.blink[pixel_index] = blink
changed = True
return changed
def __update(self):
self.lock.acquire()
for i in range(self.num_leds):
if not self.blink[i]:
for j in range(3):
if 0 <= self.led_colour[i][j] < 256:
self.buffer[i][j] = self.led_colour[i][j]
else:
if self.blink_direction[i]:
for j in range(3):
self.buffer[i][j] = float(self.buffer[i][j]) + self.blink_step[i][j]
if self.buffer[i][j] > self.led_colour[i][j]:
self.buffer[i][j] = self.led_colour[i][j]
self.blink_direction[i] = False
else:
for j in range(3):
self.buffer[i][j] = float(self.buffer[i][j]) - self.blink_step[i][j]
if self.buffer[i][j] < self.blink_step[i][j]:
self.blink_direction[i] = True
self.lock.release()
tmp = [bytearray(3) for x in range(self.num_leds)]
for i in range(self.num_leds):
for j in range(3):
tmp[i][j] = self.gamma[int(self.buffer[i][j])]
if not self.simulate_mode:
for x in range(self.num_leds):
self.spi.write(tmp[x])
self.spi.write(bytearray(b'\x00'))
self.spi.flush()
else:
print '%s\n' % str(tmp)
def run(self):
if self.terminate:
return
try:
self.__update()
self.logger.log('LED driver thread: STARTED')
while not self.terminate:
timeout = None
if True in self.blink:
timeout = self.sleep_interval_s
self.update_event.wait(timeout)
self.update_event.clear()
self.__update()
self.alloff()
self.__update()
except Exception, e:
logger.print_trace(e)
self.logger.log('LED driver thread: STOPPED')
def stop(self):
self.terminate = True
self.alloff()
self.update_event.set()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.