code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import namedtuple
import mock
import pytest
from kazoo.exceptions import NoNodeError
from kafka_utils.util.config import ClusterConfig
from kafka_utils.util.serialization import dump_json
from kafka_utils.util.zookeeper import ZK
MockGetTopics = namedtuple('MockGetTopics', ['ctime'])
@mock.patch(
'kafka_utils.util.zookeeper.KazooClient',
autospec=True
)
class TestZK(object):
cluster_config = ClusterConfig(
type='mytype',
name='some_cluster',
broker_list='some_list',
zookeeper='some_ip'
)
def test_create(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.create(
'/kafka/consumers/some_group/offsets'
)
zk.create(
'/kafka/consumers/some_group/offsets',
value='some_val',
acl=None,
ephemeral=True,
sequence=True,
makepath=True
)
mock_obj = mock.Mock()
zk.create(
'/kafka/consumers/some_group/offsets',
value='some_val',
acl=mock_obj,
)
call_list = [
mock.call(
'/kafka/consumers/some_group/offsets',
'', None, False, False, False
),
mock.call(
'/kafka/consumers/some_group/offsets',
'some_val', None, True, True, True
),
mock.call(
'/kafka/consumers/some_group/offsets',
'some_val', mock_obj, False, False, False
),
]
assert mock_client.return_value.create.call_args_list == call_list
def test_set(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.set(
'config/topics/some_topic',
'some_val'
)
zk.set(
'brokers/topics/some_topic',
'{"name": "some_topic", "more": "properties"}'
)
call_list = [
mock.call(
'config/topics/some_topic',
'some_val'
),
mock.call(
'brokers/topics/some_topic',
'{"name": "some_topic", "more": "properties"}'
)
]
assert mock_client.return_value.set.call_args_list == call_list
def test_delete(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.delete(
'/kafka/consumers/some_group/offsets',
)
zk.delete(
'/kafka/consumers/some_group/offsets',
recursive=True
)
call_list = [
mock.call(
'/kafka/consumers/some_group/offsets',
recursive=False
),
mock.call(
'/kafka/consumers/some_group/offsets',
recursive=True
),
]
assert mock_client.return_value.delete.call_args_list == call_list
def test_delete_topic(self, _):
with mock.patch.object(
ZK,
'delete',
autospec=True
) as mock_delete:
with ZK(self.cluster_config) as zk:
zk.delete_topic(
'some_group',
'some_topic',
)
mock_delete.assert_called_once_with(
zk,
'/consumers/some_group/offsets/some_topic',
True,
)
def test_get_my_subscribed_partitions(self, _):
with mock.patch.object(
ZK,
'get_children',
autospec=True,
) as mock_children:
with ZK(self.cluster_config) as zk:
zk.get_my_subscribed_partitions(
'some_group',
'some_topic',
)
mock_children.assert_called_once_with(
zk,
'/consumers/some_group/offsets/some_topic',
)
def test_get_topic_config(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.zk.get = mock.Mock(
return_value=(
b'{"version": 1, "config": {"cleanup.policy": "compact"}}',
"Random node info that doesn't matter"
)
)
actual = zk.get_topic_config("some_topic")
expected = {"version": 1, "config": {"cleanup.policy": "compact"}}
assert actual == expected
def test_get_topic_config_8(self, mock_client):
"""
Test getting configuration for topics created in Kafa prior to 0.9.0.
"""
with ZK(self.cluster_config) as zk:
zk.zk.get = mock.Mock(side_effect=NoNodeError())
zk.get_topics = mock.Mock(return_value={"some_topic": {}})
actual = zk.get_topic_config("some_topic")
expected = {"config": {}}
assert actual == expected
def test_get_nonexistent_topic_config(self, mock_client):
"""
Test getting configuration for topics that don't exist.
"""
with ZK(self.cluster_config) as zk:
zk.zk.get = mock.Mock(side_effect=NoNodeError())
zk.get_topics = mock.Mock(return_value={})
with pytest.raises(NoNodeError):
zk.get_topic_config("some_topic")
def test_set_topic_config_kafka_10(self, mock_client):
with mock.patch.object(
ZK,
'set',
autospec=True
) as mock_set:
with ZK(self.cluster_config) as zk:
config = {"version": 1, "config": {"cleanup.policy": "compact"}}
config_change = {"entity_path": "topics/some_topic", "version": 2}
zk.set_topic_config(
"some_topic",
config,
)
serialized_config = dump_json(config)
serialized_config_change = dump_json(config_change)
mock_set.assert_called_once_with(
zk,
'/config/topics/some_topic',
serialized_config,
)
expected_create_call = mock.call(
'/config/changes/config_change_',
serialized_config_change,
None,
False,
True,
False
)
assert mock_client.return_value.create.call_args_list == [expected_create_call]
def test_set_topic_config_kafka_9(self, mock_client):
with mock.patch.object(
ZK,
'set',
autospec=True
) as mock_set:
with ZK(self.cluster_config) as zk:
config = {"version": 1, "config": {"cleanup.policy": "compact"}}
config_change = {"version": 1, "entity_type": "topics", "entity_name": "some_topic"}
zk.set_topic_config(
"some_topic",
config,
(0, 9, 2)
)
serialized_config = dump_json(config)
serialized_config_change = dump_json(config_change)
mock_set.assert_called_once_with(
zk,
'/config/topics/some_topic',
serialized_config,
)
expected_create_call = mock.call(
'/config/changes/config_change_',
serialized_config_change,
None,
False,
True,
False
)
assert mock_client.return_value.create.call_args_list == [expected_create_call]
def test_get_broker_config(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.zk.get = mock.Mock(
return_value=(
b'{"version": 1, "config": {"leader.replication.throttled.rate": "42"}}',
"Random node info that doesn't matter"
)
)
actual = zk.get_broker_config(0)
expected = {"version": 1, "config": {"leader.replication.throttled.rate": "42"}}
assert actual == expected
def test_set_broker_config_kafka_10(self, mock_client):
with mock.patch.object(
ZK,
'set',
autospec=True
) as mock_set:
with ZK(self.cluster_config) as zk:
config = {"version": 1, "config": {"leader.replication.throttled.rate": "42"}}
config_change = {"entity_path": "brokers/0", "version": 2}
zk.set_broker_config(0, config)
serialized_config = dump_json(config)
serialized_config_change = dump_json(config_change)
mock_set.assert_called_once_with(
zk,
'/config/brokers/0',
serialized_config,
)
expected_create_call = mock.call(
'/config/changes/config_change_',
serialized_config_change,
None,
False,
True,
False
)
assert mock_client.return_value.create.call_args_list == [expected_create_call]
def test_get_topics(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.zk.get = mock.Mock(
return_value=(
(
b'{"version": "1", "partitions": {"0": [1, 0]}}',
MockGetTopics(31000),
)
)
)
zk._fetch_partition_state = mock.Mock(
return_value=(
(
b'{"version": "2"}',
MockGetTopics(32000),
)
)
)
actual_with_fetch_state = zk.get_topics("some_topic")
expected_with_fetch_state = {
'some_topic': {
'ctime': 31.0,
'partitions': {
'0': {
'replicas': [1, 0],
'ctime': 32.0,
'version': '2',
},
},
'version': '1',
},
}
assert actual_with_fetch_state == expected_with_fetch_state
zk._fetch_partition_info = mock.Mock(
return_value=MockGetTopics(33000)
)
actual_without_fetch_state = zk.get_topics("some_topic", fetch_partition_state=False)
expected_without_fetch_state = {
'some_topic': {
'ctime': 31.0,
'partitions': {
'0': {
'replicas': [1, 0],
'ctime': 33.0,
},
},
'version': '1',
},
}
assert actual_without_fetch_state == expected_without_fetch_state
def test_get_topics_empty_cluster(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(side_effect=NoNodeError())
actual_with_no_node_error = zk.get_topics()
expected_with_no_node_error = {}
zk.get_children.assert_called_with("/brokers/topics")
assert actual_with_no_node_error == expected_with_no_node_error
def test_get_brokers_names_only(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(
return_value=[1, 2, 3],
)
expected = {1: None, 2: None, 3: None}
actual = zk.get_brokers(names_only=True)
zk.get_children.assert_called_with("/brokers/ids")
assert actual == expected
def test_get_brokers_with_metadata(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(
return_value=[1, 2, 3],
)
zk.get_broker_metadata = mock.Mock(
return_value='broker',
)
expected = {1: 'broker', 2: 'broker', 3: 'broker'}
actual = zk.get_brokers()
zk.get_children.assert_called_with("/brokers/ids")
calls = zk.get_broker_metadata.mock_calls
zk.get_broker_metadata.assert_has_calls(calls)
assert actual == expected
def test_get_brokers_empty_cluster(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(side_effect=NoNodeError())
actual_with_no_node_error = zk.get_brokers()
expected_with_no_node_error = {}
zk.get_children.assert_called_with("/brokers/ids")
assert actual_with_no_node_error == expected_with_no_node_error
def test_get_brokers_with_metadata_for_ssl(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(
return_value=[1],
)
zk.get = mock.Mock(
return_value=(b'{"endpoints":["SSL://broker:9093"],"host":null}', None)
)
expected = {1: {'host': 'broker'}}
actual = zk.get_brokers()
assert actual[1]['host'] == expected[1]['host']
zk.get = mock.Mock(
return_value=(b'{"endpoints":["INTERNAL://broker:9093","EXTERNAL://broker:9093"],"host":null}', None)
)
expected = {1: {'host': 'broker'}}
actual = zk.get_brokers()
assert actual[1]['host'] == expected[1]['host']
def test_get_brokers_with_metadata_for_sasl(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(
return_value=[1],
)
zk.get = mock.Mock(
return_value=(b'{"endpoints":["PLAINTEXTSASL://broker:9093"],"host":null}', None)
)
expected = {1: {'host': 'broker'}}
actual = zk.get_brokers()
assert actual[1]['host'] == expected[1]['host']
def test_get_brokers_with_metadata_for_plaintext(self, mock_client):
with ZK(self.cluster_config) as zk:
zk.get_children = mock.Mock(
return_value=[1],
)
zk.get = mock.Mock(
return_value=(b'{"endpoints":[],"host":"broker"}', None)
)
expected = {1: {'host': 'broker'}}
actual = zk.get_brokers()
assert actual[1]['host'] == expected[1]['host']
| Yelp/kafka-utils | tests/util/zookeeper_test.py | Python | apache-2.0 | 15,746 |
#!/usr/bin/env python
import csv, sys
mapping = {}
totalTruth, totalTesting, hit, miss, errors = (0, 0, 0, 0, 0)
with open(sys.argv[1], 'rb') as groundtruth:
reader = csv.reader(groundtruth)
for row in reader:
totalTruth += 1
mapping[(row[1], row[2])] = row[0]
with open(sys.argv[2], 'rb') as testing:
reader = csv.reader(testing)
for row in reader:
totalTesting += 1
try:
if (mapping[(row[1], row[2])] == row[0]):
hit += 1
else:
miss += 1
except KeyError:
errors += 1
print "Total size: ", totalTruth, " and testing size: ", totalTesting
print "Correct assignments: ", hit, " and failed assigments: ", miss
print "Errors: ", errors
print "Accuracy: ", float(hit) / float(totalTruth)
| whoww/peel-flink-kmeans | VarianceBenchmarkResults/AccuracyMeasure.py | Python | apache-2.0 | 815 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import Table
def map(engine, models):
meta = MetaData()
meta.bind = engine
if mapping_exists(models['instance']):
return
orm.mapper(models['instance'], Table('instances', meta, autoload=True))
orm.mapper(models['root_enabled_history'],
Table('root_enabled_history', meta, autoload=True))
orm.mapper(models['datastore'],
Table('datastores', meta, autoload=True))
orm.mapper(models['datastore_version'],
Table('datastore_versions', meta, autoload=True))
orm.mapper(models['capabilities'],
Table('capabilities', meta, autoload=True))
orm.mapper(models['capability_overrides'],
Table('capability_overrides', meta, autoload=True))
orm.mapper(models['service_statuses'],
Table('service_statuses', meta, autoload=True))
orm.mapper(models['dns_records'],
Table('dns_records', meta, autoload=True))
orm.mapper(models['agent_heartbeats'],
Table('agent_heartbeats', meta, autoload=True))
orm.mapper(models['quotas'],
Table('quotas', meta, autoload=True))
orm.mapper(models['quota_usages'],
Table('quota_usages', meta, autoload=True))
orm.mapper(models['reservations'],
Table('reservations', meta, autoload=True))
orm.mapper(models['backups'],
Table('backups', meta, autoload=True))
orm.mapper(models['security_group'],
Table('security_groups', meta, autoload=True))
orm.mapper(models['security_group_rule'],
Table('security_group_rules', meta, autoload=True))
orm.mapper(models['security_group_instance_association'],
Table('security_group_instance_associations', meta,
autoload=True))
orm.mapper(models['configurations'],
Table('configurations', meta, autoload=True))
orm.mapper(models['configuration_parameters'],
Table('configuration_parameters', meta, autoload=True))
orm.mapper(models['conductor_lastseen'],
Table('conductor_lastseen', meta, autoload=True))
orm.mapper(models['clusters'],
Table('clusters', meta, autoload=True))
orm.mapper(models['datastore_configuration_parameters'],
Table('datastore_configuration_parameters', meta,
autoload=True))
def mapping_exists(model):
try:
orm.class_mapper(model)
return True
except orm_exc.UnmappedClassError:
return False
| cp16net/trove | trove/db/sqlalchemy/mappers.py | Python | apache-2.0 | 3,298 |
from molotov.api import pick_scenario, scenario, get_scenarios, setup
from molotov.tests.support import TestLoop, async_test
class TestUtil(TestLoop):
def test_pick_scenario(self):
@scenario(weight=10)
async def _one(self):
pass
@scenario(weight=90)
async def _two(self):
pass
picked = [pick_scenario()["name"] for i in range(100)]
ones = len([f for f in picked if f == "_one"])
self.assertTrue(ones < 20)
@async_test
async def test_can_call(self, loop, console, results):
@setup()
async def _setup(self):
pass
@scenario(weight=10)
async def _one(self):
pass
# can still be called
await _one(self)
# same for fixtures
await _setup(self)
def test_default_weight(self):
@scenario()
async def _default_weight(self):
pass
self.assertEqual(len(get_scenarios()), 1)
self.assertEqual(get_scenarios()[0]["weight"], 1)
def test_no_scenario(self):
@scenario(weight=0)
async def _one(self):
pass
@scenario(weight=0)
async def _two(self):
pass
self.assertEqual(get_scenarios(), [])
def test_scenario_not_coroutine(self):
try:
@scenario(weight=1)
def _one(self):
pass
except TypeError:
return
raise AssertionError("Should raise")
def test_setup_not_coroutine(self):
try:
@setup()
def _setup(self):
pass
@scenario(weight=90)
async def _two(self):
pass
except TypeError:
return
raise AssertionError("Should raise")
def test_two_fixtures(self):
try:
@setup()
async def _setup(self):
pass
@setup()
async def _setup2(self):
pass
@scenario(weight=90)
async def _two(self):
pass
except ValueError:
return
raise AssertionError("Should raise")
| loads/molotov | molotov/tests/test_api.py | Python | apache-2.0 | 2,203 |
"""
IOStore class originated here
https://github.com/BD2KGenomics/hgvm-graph-bakeoff-evaluations/blob/master/scripts/toillib.py
and was then here:
https://github.com/cmarkello/toil-lib/blob/master/src/toil_lib/toillib.py
In a perfect world, this would be deprecated and replaced with Toil's stores.
Actually did this here:
https://github.com/glennhickey/toil-vg/tree/issues/110-fix-iostore
But couldn't get Toil's multipart S3 uploader working on large files. Also,
the toil jobStore interface is a little less clean for our use.
So for now keep as part of toil-vg where it works. Could also consider merging
into the upstream toil-lib
https://github.com/BD2KGenomics/toil-lib
"""
import sys, os, os.path, json, collections, logging, logging.handlers
import struct, socket, threading, tarfile, shutil
import tempfile
import functools
import random
import time
import dateutil
import traceback
import stat
from toil.realtimeLogger import RealtimeLogger
import datetime
# Need stuff for Amazon s3
try:
import boto3
import botocore
have_s3 = True
except ImportError:
have_s3 = False
pass
# We need some stuff in order to have Azure
try:
import azure
# Make sure to get the 0.11 BlobService, in case the new azure storage
# module is also installed.
from azure.storage.blob import BlobService
import toil.jobStores.azureJobStore
have_azure = True
except ImportError:
have_azure = False
pass
def robust_makedirs(directory):
"""
Make a directory when other nodes may be trying to do the same on a shared
filesystem.
"""
if not os.path.exists(directory):
try:
# Make it if it doesn't exist
os.makedirs(directory)
except OSError:
# If you can't make it, maybe someone else did?
pass
# Make sure it exists and is a directory
assert(os.path.exists(directory) and os.path.isdir(directory))
def write_global_directory(file_store, path, cleanup=False, tee=None, compress=True):
"""
Write the given directory into the file store, and return an ID that can be
used to retrieve it. Writes the files in the directory and subdirectories
into a tar file in the file store.
Does not preserve the name or permissions of the given directory (only of
its contents).
If cleanup is true, directory will be deleted from the file store when this
job and its follow-ons finish.
If tee is passed, a tar.gz of the directory contents will be written to that
filename. The file thus created must not be modified after this function is
called.
"""
write_stream_mode = "w"
if compress:
write_stream_mode = "w|gz"
if tee is not None:
with open(tee, "w") as file_handle:
# We have a stream, so start taring into it
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Save the file on disk to the file store.
return file_store.writeGlobalFile(tee)
else:
with file_store.writeGlobalFileStream(cleanup=cleanup) as (file_handle,
file_id):
# We have a stream, so start taring into it
# TODO: don't duplicate this code.
with tarfile.open(fileobj=file_handle, mode=write_stream_mode) as tar:
# Open it for streaming-only write (no seeking)
# We can't just add the root directory, since then we wouldn't be
# able to extract it later with an arbitrary name.
for file_name in os.listdir(path):
# Add each file in the directory to the tar, with a relative
# path
tar.add(os.path.join(path, file_name), arcname=file_name)
# Spit back the ID to use to retrieve it
return file_id
def read_global_directory(file_store, directory_id, path):
"""
Reads a directory with the given tar file id from the global file store and
recreates it at the given path.
The given path, if it exists, must be a directory.
Do not use to extract untrusted directories, since they could sneakily plant
files anywhere on the filesystem.
"""
# Make the path
robust_makedirs(path)
with file_store.readGlobalFileStream(directory_id) as file_handle:
# We need to pull files out of this tar stream
with tarfile.open(fileobj=file_handle, mode="r|*") as tar:
# Open it for streaming-only read (no seeking)
# We need to extract the whole thing into that new directory
tar.extractall(path)
class IOStore(object):
"""
A class that lets you get your input files and save your output files
to/from a local filesystem, Amazon S3, or Microsoft Azure storage
transparently.
This is the abstract base class; other classes inherit from this and fill in
the methods.
"""
def __init__(self):
"""
Make a new IOStore
"""
raise NotImplementedError()
def read_input_file(self, input_path, local_path):
"""
Read an input file from wherever the input comes from and send it to the
given path.
If the file at local_path already exists, it is overwritten.
If the file at local_path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Save the given local file to the given output path. No output directory
needs to exist already.
If the output path already exists, it is overwritten.
If the output path already exists and is a directory, behavior is
undefined.
"""
raise NotImplementedError()
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given gile if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
@staticmethod
def absolute(store_string):
"""
Convert a relative path IOStore string to an absolute path one. Leaves
strings that aren't FileIOStore specifications alone.
Since new Toil versions change the working directory of SingleMachine
batch system jobs, we need to have absolute paths passed into jobs.
Recommended to be used as an argparse type, so that strings can be
directly be passed to IOStore.get on the nodes.
"""
if store_string == "":
return ""
if store_string[0] == ".":
# It's a relative ./ path
return os.path.abspath(store_string)
if store_string.startswith("file:"):
# It's a file:-prefixed thing that may be a relative path
# Normalize the part after "file:" (which is 5 characters)
return "file:" + os.path.abspath(store_string[5:])
return store_string
@staticmethod
def get(store_string):
"""
Get a concrete IOStore created from the given connection string.
Valid formats are just like for a Toil JobStore, except with container
names being specified on Azure.
Formats:
/absolute/filesystem/path
./relative/filesystem/path
file:filesystem/path
aws:region:bucket (TODO)
aws:region:bucket/path/prefix (TODO)
azure:account:container (instead of a container prefix) (gets keys like
Toil)
azure:account:container/path/prefix (trailing slash added automatically)
"""
# Code adapted from toil's common.py loadJobStore()
if store_string[0] in "/.":
# Prepend file: tot he path
store_string = "file:" + store_string
try:
# Break off the first colon-separated piece.
store_type, store_arguments = store_string.split(":", 1)
except ValueError:
# They probably forgot the . or /
raise RuntimeError("Incorrect IO store specification {}. "
"Local paths must start with . or /".format(store_string))
if store_type == "file":
return FileIOStore(store_arguments)
elif store_type == "aws":
# Break out the AWS arguments
region, bucket_name = store_arguments.split(":", 1)
if "/" in bucket_name:
# Split the bucket from the path
bucket_name, path_prefix = bucket_name.split("/", 1)
else:
# No path prefix
path_prefix = ""
return S3IOStore(region, bucket_name, path_prefix)
elif store_type == "azure":
# Break out the Azure arguments.
account, container = store_arguments.split(":", 1)
if "/" in container:
# Split the container from the path
container, path_prefix = container.split("/", 1)
else:
# No path prefix
path_prefix = ""
return AzureIOStore(account, container, path_prefix)
else:
raise RuntimeError("Unknown IOStore implementation {}".format(
store_type))
class FileIOStore(IOStore):
"""
A class that lets you get input from and send output to filesystem files.
"""
def __init__(self, path_prefix=""):
"""
Make a new FileIOStore that just treats everything as local paths,
relative to the given prefix.
"""
self.path_prefix = path_prefix
def read_input_file(self, input_path, local_path):
"""
Get input from the filesystem.
"""
RealtimeLogger.debug("Loading {} from FileIOStore in {} to {}".format(
input_path, self.path_prefix, local_path))
if os.path.exists(local_path):
# Try deleting the existing item if it already exists
try:
os.unlink(local_path)
except:
# Don't fail here, fail complaining about the assertion, which
# will be more informative.
pass
# Make sure the path is clear for copying
assert(not os.path.exists(local_path))
# Where is the file actually?
real_path = os.path.abspath(os.path.join(self.path_prefix, input_path))
if not os.path.exists(real_path):
RealtimeLogger.error(
"Can't find {} from FileIOStore in {}!".format(input_path,
self.path_prefix))
raise RuntimeError("File {} missing!".format(real_path))
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=os.path.dirname(local_path))
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(real_path, temp_path)
# Rename the temp file to the right place, atomically
RealtimeLogger.info("rename {} -> {}".format(temp_path, local_path))
os.rename(temp_path, local_path)
# Look at the file stats
file_stats = os.stat(real_path)
if (file_stats.st_uid == os.getuid() and
file_stats.st_mode & stat.S_IWUSR):
# We own this file and can write to it. We don't want the user
# script messing it up through the symlink.
try:
# Clear the user write bit, so the user can't accidentally
# clobber the file in the actual store through the symlink.
os.chmod(real_path, file_stats.st_mode ^ stat.S_IWUSR)
except OSError:
# If something goes wrong here (like us not having permission to
# change permissions), ignore it.
pass
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over directories on the filesystem.
"""
RealtimeLogger.info("Enumerating {} from "
"FileIOStore in {}".format(input_path, self.path_prefix))
if not os.path.exists(os.path.join(self.path_prefix, input_path)):
# Nothing to list over
return
if not os.path.isdir(os.path.join(self.path_prefix, input_path)):
# Can't list a file, only a directory.
return
for item in os.listdir(os.path.join(self.path_prefix, input_path)):
if(recursive and os.path.isdir(os.path.join(self.path_prefix,
input_path, item))):
# We're recursing and this is a directory.
# Recurse on this.
for subitem in self.list_input_directory(
os.path.join(input_path, item), recursive):
# Make relative paths include this directory name and yield
# them
name_to_yield = os.path.join(item, subitem)
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item, subitem))
# Convert it to datetime
yield name_to_yield, mtime_epoch_seconds
else:
yield name_to_yield
else:
# This isn't a directory or we aren't being recursive
# Just report this individual item.
if with_times:
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(
input_path, item))
yield item, mtime_epoch_seconds
else:
yield item
def write_output_file(self, local_path, output_path):
"""
Write output to the filesystem
"""
RealtimeLogger.debug("Saving {} to FileIOStore in {}".format(
output_path, self.path_prefix))
# What's the real output path to write to?
real_output_path = os.path.join(self.path_prefix, output_path)
# What directory should this go in?
parent_dir = os.path.split(real_output_path)[0]
if parent_dir != "":
# Make sure the directory it goes in exists.
robust_makedirs(parent_dir)
# Make a temporary file
temp_handle, temp_path = tempfile.mkstemp(dir=self.path_prefix)
os.close(temp_handle)
# Copy to the temp file
shutil.copy2(local_path, temp_path)
if os.path.exists(real_output_path):
# At least try to get existing files out of the way first.
try:
os.unlink(real_output_path)
except:
pass
# Rename the temp file to the right place, atomically
os.rename(temp_path, real_output_path)
def exists(self, path):
"""
Returns true if the given input or output file exists in the file system
already.
"""
return os.path.exists(os.path.join(self.path_prefix, path))
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# What is the mtime in seconds since epoch?
mtime_epoch_seconds = os.path.getmtime(os.path.join(self.path_prefix,
path))
# Convert it to datetime
mtime_datetime = datetime.datetime.utcfromtimestamp(
mtime_epoch_seconds).replace(tzinfo=dateutil.tz.tzutc())
# Return the modification time, timezoned, in UTC
return mtime_datetime
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
if not self.exists(path):
return None
# Return the size in bytes of the backing file
return os.stat(os.path.join(self.path_prefix, path)).st_size
class BackoffError(RuntimeError):
"""
Represents an error from running out of retries during exponential back-off.
"""
def backoff_times(retries, base_delay):
"""
A generator that yields times for random exponential back-off. You have to
do the exception handling and sleeping yourself. Stops when the retries run
out.
"""
# Don't wait at all before the first try
yield 0
# What retry are we on?
try_number = 1
# Make a delay that increases
delay = float(base_delay) * 2
while try_number <= retries:
# Wait a random amount between 0 and 2^try_number * base_delay
yield random.uniform(base_delay, delay)
delay *= 2
try_number += 1
# If we get here, we're stopping iteration without succeeding. The caller
# will probably raise an error.
def backoff(original_function, retries=6, base_delay=10):
"""
We define a decorator that does randomized exponential back-off up to a
certain number of retries. Raises BackoffError if the operation doesn't
succeed after backing off for the specified number of retries (which may be
float("inf")).
Unfortunately doesn't really work on generators.
"""
# Make a new version of the function
@functools.wraps(original_function)
def new_function(*args, **kwargs):
# Call backoff times, overriding parameters with stuff from kwargs
for delay in backoff_times(retries=kwargs.get("retries", retries),
base_delay=kwargs.get("base_delay", base_delay)):
# Keep looping until it works or our iterator raises a
# BackoffError
if delay > 0:
# We have to wait before trying again
RealtimeLogger.error("Retry after {} seconds".format(
delay))
time.sleep(delay)
try:
return original_function(*args, **kwargs)
except:
# Report the formatted underlying exception with traceback
RealtimeLogger.error("{} failed due to: {}".format(
original_function.__name__,
"".join(traceback.format_exception(*sys.exc_info()))))
# If we get here, the function we're calling never ran through before we
# ran out of backoff times. Give an error.
raise BackoffError("Ran out of retries calling {}".format(
original_function.__name__))
return new_function
class S3IOStore(IOStore):
"""
A class that lets you get input from and send output to AWS S3 Storage.
"""
def __init__(self, region, bucket_name, name_prefix=""):
"""
Make a new S3IOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
"""
# Make sure azure libraries actually loaded
assert(have_s3)
self.region = region
self.bucket_name = bucket_name
self.name_prefix = name_prefix
self.s3 = None
def __connect(self):
"""
Make sure we have an S3 Bucket connection, and set one up if we don't.
Creates the S3 bucket if it doesn't exist.
"""
if self.s3 is None:
RealtimeLogger.debug("Connecting to bucket {} in region".format(
self.bucket_name, self.region))
# Configure boto3 for caching assumed role credentials with the same cache Toil uses
botocore_session = botocore.session.get_session()
botocore_session.get_component('credential_provider').get_provider('assume-role').cache = botocore.credentials.JSONFileCache()
boto3_session = boto3.Session(botocore_session=botocore_session)
# Connect to the s3 bucket service where we keep everything
self.s3 = boto3_session.client('s3')
try:
self.s3.head_bucket(Bucket=self.bucket_name)
except:
self.s3.create_bucket(Bucket=self.bucket_name,
CreateBucketConfiguration={'LocationConstraint':self.region})
def read_input_file(self, input_path, local_path):
"""
Get input from S3.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from S3IOStore".format(
input_path))
# Download the file contents.
self.s3.download_file(self.bucket_name, os.path.join(self.name_prefix, input_path), local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Yields each of the subdirectories and files in the given input path.
If recursive is false, yields files and directories in the given
directory. If recursive is true, yields all files contained within the
current directory, recursively, but does not yield folders.
If with_times is True, yields (name, modification time) pairs instead of
just names, with modification times represented as datetime objects in
the GMT timezone. Modification times may be None on objects that do not
support them.
Gives relative file/directory names.
"""
raise NotImplementedError()
def write_output_file(self, local_path, output_path):
"""
Write output to S3.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to S3IOStore".format(
output_path))
# Download the file contents.
self.s3.upload_file(local_path, self.bucket_name, os.path.join(self.name_prefix, output_path))
def exists(self, path):
"""
Returns true if the given input or output file exists in the store
already.
"""
raise NotImplementedError()
def get_mtime(self, path):
"""
Returns the modification time of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
def get_size(self, path):
"""
Returns the size in bytes of the given file if it exists, or None
otherwise.
"""
raise NotImplementedError()
class AzureIOStore(IOStore):
"""
A class that lets you get input from and send output to Azure Storage.
"""
def __init__(self, account_name, container_name, name_prefix=""):
"""
Make a new AzureIOStore that reads from and writes to the given
container in the given account, adding the given prefix to keys. All
paths will be interpreted as keys or key prefixes.
If the name prefix does not end with a trailing slash, and is not empty,
one will be added automatically.
Account keys are retrieved from the AZURE_ACCOUNT_KEY environment
variable or from the ~/.toilAzureCredentials file, as in Toil itself.
"""
# Make sure azure libraries actually loaded
assert(have_azure)
self.account_name = account_name
self.container_name = container_name
self.name_prefix = name_prefix
if self.name_prefix != "" and not self.name_prefix.endswith("/"):
# Make sure it has the trailing slash required.
self.name_prefix += "/"
# Sneak into Toil and use the same keys it uses
self.account_key = toil.jobStores.azureJobStore._fetchAzureAccountKey(
self.account_name)
# This will hold out Azure blob store connection
self.connection = None
def __getstate__(self):
"""
Return the state to use for pickling. We don't want to try and pickle
an open Azure connection.
"""
return (self.account_name, self.account_key, self.container_name,
self.name_prefix)
def __setstate__(self, state):
"""
Set up after unpickling.
"""
self.account_name = state[0]
self.account_key = state[1]
self.container_name = state[2]
self.name_prefix = state[3]
self.connection = None
def __connect(self):
"""
Make sure we have an Azure connection, and set one up if we don't.
"""
if self.connection is None:
RealtimeLogger.debug("Connecting to account {}, using "
"container {} and prefix {}".format(self.account_name,
self.container_name, self.name_prefix))
# Connect to the blob service where we keep everything
self.connection = BlobService(
account_name=self.account_name, account_key=self.account_key)
@backoff
def read_input_file(self, input_path, local_path):
"""
Get input from Azure.
"""
self.__connect()
RealtimeLogger.debug("Loading {} from AzureIOStore".format(
input_path))
# Download the blob. This is known to be synchronous, although it can
# call a callback during the process.
self.connection.get_blob_to_path(self.container_name,
self.name_prefix + input_path, local_path)
def list_input_directory(self, input_path, recursive=False,
with_times=False):
"""
Loop over fake /-delimited directories on Azure. The prefix may or may
not not have a trailing slash; if not, one will be added automatically.
Returns the names of files and fake directories in the given input fake
directory, non-recursively.
If with_times is specified, will yield (name, time) pairs including
modification times as datetime objects. Times on directories are None.
"""
self.__connect()
RealtimeLogger.info("Enumerating {} from AzureIOStore".format(
input_path))
# Work out what the directory name to list is
fake_directory = self.name_prefix + input_path
if fake_directory != "" and not fake_directory.endswith("/"):
# We have a nonempty prefix, and we need to end it with a slash
fake_directory += "/"
# This will hold the marker that we need to send back to get the next
# page, if there is one. See <http://stackoverflow.com/a/24303682>
marker = None
# This holds the subdirectories we found; we yield each exactly once if
# we aren't recursing.
subdirectories = set()
while True:
# Get the results from Azure. We don't use delimiter since Azure
# doesn't seem to provide the placeholder entries it's supposed to.
result = self.connection.list_blobs(self.container_name,
prefix=fake_directory, marker=marker)
RealtimeLogger.info("Found {} files".format(len(result)))
for blob in result:
# Yield each result's blob name, but directory names only once
# Drop the common prefix
relative_path = blob.name[len(fake_directory):]
if (not recursive) and "/" in relative_path:
# We found a file in a subdirectory, and we aren't supposed
# to be recursing.
subdirectory, _ = relative_path.split("/", 1)
if subdirectory not in subdirectories:
# It's a new subdirectory. Yield and remember it
subdirectories.add(subdirectory)
if with_times:
yield subdirectory, None
else:
yield subdirectory
else:
# We found an actual file
if with_times:
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
yield relative_path, mtime
else:
yield relative_path
# Save the marker
marker = result.next_marker
if not marker:
break
@backoff
def write_output_file(self, local_path, output_path):
"""
Write output to Azure. Will create the container if necessary.
"""
self.__connect()
RealtimeLogger.debug("Saving {} to AzureIOStore".format(
output_path))
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Upload the blob (synchronously)
# TODO: catch no container error here, make the container, and retry
self.connection.put_block_blob_from_path(self.container_name,
self.name_prefix + output_path, local_path)
@backoff
def exists(self, path):
"""
Returns true if the given input or output file exists in Azure already.
"""
self.__connect()
marker = None
while True:
try:
# Make the container
self.connection.create_container(self.container_name)
except azure.WindowsAzureConflictError:
# The container probably already exists
pass
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
return True
# Save the marker
marker = result.next_marker
if not marker:
break
return False
@backoff
def get_mtime(self, path):
"""
Returns the modification time of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
mtime = blob.properties.last_modified
if isinstance(mtime, datetime.datetime):
# Make sure we're getting proper localized datetimes
# from the new Azure Storage API.
assert(mtime.tzinfo is not None and
mtime.tzinfo.utcoffset(mtime) is not None)
else:
# Convert mtime from a string as in the old API.
mtime = dateutil.parser.parse(mtime).replace(
tzinfo=dateutil.tz.tzutc())
return mtime
# Save the marker
marker = result.next_marker
if not marker:
break
return None
@backoff
def get_size(self, path):
"""
Returns the size in bytes of the given blob if it exists, or None
otherwise.
"""
self.__connect()
marker = None
while True:
# Get the results from Azure.
result = self.connection.list_blobs(self.container_name,
prefix=self.name_prefix + path, marker=marker)
for blob in result:
# Look at each blob
if blob.name == self.name_prefix + path:
# Found it
size = blob.properties.content_length
return size
# Save the marker
marker = result.next_marker
if not marker:
break
return None
| vgteam/toil-vg | src/toil_vg/iostore.py | Python | apache-2.0 | 36,706 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine.resources.huawei.elb import elb_res_base
from conveyor.i18n import _
class Listener(elb_res_base.ElbBaseResource):
"""A resource for ELB Listener.
Listener resource for Elastic Load Balance Service.
"""
PROPERTIES = (
NAME, DESCRIPTION, LB_ID, PROTOCOL, PORT,
BACKEND_PROTOCOL, BACKEND_PORT, LB_ALGORITHM, SESSION_STICKY,
STICKY_SESSION_TYPE, COOKIE_TIMEOUT, CERTIFICATE,
TCP_TIMEOUT,
) = (
'name', 'description', 'loadbalancer_id', 'protocol', 'port',
'backend_protocol', 'backend_port', 'lb_algorithm', 'session_sticky',
'sticky_session_type', 'cookie_timeout', 'certificate_id',
'tcp_timeout',
)
_BACKEND_PROTOCOLS = (
HTTP, TCP,
) = (
'HTTP', 'TCP',
)
HTTPS = ('HTTPS')
_PROTOCOLS = _BACKEND_PROTOCOLS + (HTTPS,)
_LB_ALGORITHMS = (
ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP,
) = (
'roundrobin', 'leastconn', 'source',
)
ATTRIBUTES = (
MEMBER_NUMBER_ATTR, STATUS_ATTR,
) = (
'member_number', 'status',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('The name of the listener.'),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedPattern('^[0-9a-zA-Z-_]{1,64}$')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('The description of the listener.'),
update_allowed=True,
constraints=[constraints.AllowedPattern('^[^<>]{1,128}$')]
),
LB_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of load balancer associated.'),
required=True,
constraints=[
constraints.CustomConstraint('elb.lb')
]
),
PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('The protocol of the listener.'),
constraints=[
constraints.AllowedValues(_PROTOCOLS)
],
required=True
),
BACKEND_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('The backend protocol of the listener.'),
constraints=[
constraints.AllowedValues(_BACKEND_PROTOCOLS)
],
required=True
),
PORT: properties.Schema(
properties.Schema.INTEGER,
_('The port of the listener.'),
constraints=[
constraints.Range(min=1, max=65535)
],
required=True,
update_allowed=True,
),
BACKEND_PORT: properties.Schema(
properties.Schema.INTEGER,
_('The backend port of the listener.'),
constraints=[
constraints.Range(min=1, max=65535)
],
required=True,
update_allowed=True,
),
LB_ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm used to distribute load.'),
constraints=[
constraints.AllowedValues(_LB_ALGORITHMS)
],
required=True,
update_allowed=True,
),
SESSION_STICKY: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether to keep the session.'),
update_allowed=True
),
STICKY_SESSION_TYPE: properties.Schema(
properties.Schema.STRING,
_('The way of handing cookie.'),
constraints=[
constraints.AllowedValues(['insert'])
],
),
COOKIE_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('The timeout of cookie in minute.'),
constraints=[
constraints.Range(min=1, max=1440)
],
update_allowed=True
),
CERTIFICATE: properties.Schema(
properties.Schema.STRING,
_('The ID of certificate.'),
constraints=[
constraints.CustomConstraint('elb.cert')
]
),
TCP_TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
_('The timeout of TCP session in minute.'),
constraints=[
constraints.Range(min=1, max=5)
],
update_allowed=True
),
}
attributes_schema = {
MEMBER_NUMBER_ATTR: attributes.Schema(
_('The number of the members listened by this listener.'),
),
STATUS_ATTR: attributes.Schema(
_('The status of the listener.'),
),
}
def validate(self):
super(Listener, self).validate()
protocol = self.properties[self.PROTOCOL]
session_sticky = self.properties[self.SESSION_STICKY]
sticky_type = self.properties[self.STICKY_SESSION_TYPE]
certificate = self.properties[self.CERTIFICATE]
tcp_timeout = self.properties[self.TCP_TIMEOUT]
if protocol == self.HTTP and session_sticky:
if sticky_type != 'insert':
msg = (_('Property %(sticky_type)s should be "insert" '
'when %(protocol)s is %(http)s and '
'%(session_sticky)s is enabled.') %
{'sticky_type': self.STICKY_SESSION_TYPE,
'protocol': self.PROTOCOL,
'http': self.HTTP,
'session_sticky': self.SESSION_STICKY})
raise exception.StackValidationFailed(message=msg)
if protocol == self.HTTPS:
if not certificate:
msg = (_('Property %(cert)s is required when %(protocol)s '
'is %(https)s') %
{'cert': self.CERTIFICATE,
'protocol': self.PROTOCOL,
'https': self.HTTPS})
raise exception.StackValidationFailed(message=msg)
if tcp_timeout and protocol != self.TCP:
msg = (_('Property %(tcpt)s is valid when %(protocol)s '
'is %(tcp)s') %
{'tcpt': self.TCP_TIMEOUT,
'protocol': self.PROTOCOL,
'tcp': self.TCP})
raise exception.StackValidationFailed(message=msg)
def _resolve_attribute(self, name):
if not self.resource_id:
return
ls = self.client().listener.get(self.resource_id)
if name == self.MEMBER_NUMBER_ATTR:
return ls.extra['member_number']
if name == self.STATUS_ATTR:
return ls.status
def FnGetRefId(self):
return self.resource_id
def handle_create(self):
props = self._prepare_properties(self.properties)
ls = self.client().listener.create(**props)
self.resource_id_set(ls.id)
return ls.status
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
if self.COOKIE_TIMEOUT in prop_diff:
if prop_diff[self.COOKIE_TIMEOUT] is None:
prop_diff.pop(self.COOKIE_TIMEOUT)
if self.TCP_TIMEOUT in prop_diff:
if prop_diff[self.TCP_TIMEOUT] is None:
prop_diff.pop(self.TCP_TIMEOUT)
if self.SESSION_STICKY in prop_diff:
if prop_diff[self.SESSION_STICKY] is None:
prop_diff.pop(self.SESSION_STICKY)
self.client().listener.update(listener_id=self.resource_id,
**prop_diff)
def handle_delete(self):
if not self.resource_id:
return
try:
self.client().listener.delete(self.resource_id)
except Exception as e:
# here we don't use ignore_not_found, because elb raises:
# BadRequest("Bad Request {'message': 'find listener failed',
# 'code': 'ELB.6030'}",)
if 'ELB.6030' in e.message:
return
raise
def check_create_complete(self, ls_status):
return self._check_active(ls_status)
def needs_replace_failed(self):
if not self.resource_id:
return True
with self.client_plugin().ignore_not_found:
ls = self.client().listener.get(self.resource_id)
return ls.status == 'ERROR'
return True
def resource_mapping():
return {
'OSE::ELB::Listener': Listener,
}
| Hybrid-Cloud/conveyor | conveyor/conveyorheat/engine/resources/huawei/elb/listener.py | Python | apache-2.0 | 9,416 |
import logging
from constance import config
from dateutil.relativedelta import relativedelta
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.utils import timezone
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from pysearpc import SearpcError
from seahub.api2.utils import api_error
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.share.models import FileShare, OrgFileShare
from seahub.utils import gen_shared_link, is_org_context
from seahub.views import check_folder_permission
logger = logging.getLogger(__name__)
def get_share_link_info(fileshare):
data = {}
token = fileshare.token
data['repo_id'] = fileshare.repo_id
data['path'] = fileshare.path
data['ctime'] = fileshare.ctime
data['view_cnt'] = fileshare.view_cnt
data['link'] = gen_shared_link(token, fileshare.s_type)
data['token'] = token
data['expire_date'] = fileshare.expire_date
data['is_expired'] = fileshare.is_expired()
data['username'] = fileshare.username
return data
class ShareLinks(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def _generate_obj_id_and_type_by_path(self, repo_id, path):
file_id = seafile_api.get_file_id_by_path(repo_id, path)
if file_id:
return (file_id, 'f')
dir_id = seafile_api.get_dir_id_by_path(repo_id, path)
if dir_id:
return (dir_id, 'd')
return (None, None)
def get(self, request):
""" get share links.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check if args invalid
repo_id = request.GET.get('repo_id', None)
if repo_id:
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# repo level permission check
if not check_folder_permission(request, repo_id, '/'):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
path = request.GET.get('path', None)
if path:
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# folder/path permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
username = request.user.username
fileshares = FileShare.objects.filter(username=username)
# filter result by args
if repo_id:
fileshares = filter(lambda fs: fs.repo_id == repo_id, fileshares)
if path:
if s_type == 'd' and path[-1] != '/':
path = path + '/'
fileshares = filter(lambda fs: fs.path == path, fileshares)
result = []
for fs in fileshares:
link_info = get_share_link_info(fs)
result.append(link_info)
if len(result) == 1:
result = result[0]
return Response(result)
def post(self, request):
""" create share link.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
repo_id = request.data.get('repo_id', None)
if not repo_id:
error_msg = 'repo_id invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
repo = seafile_api.get_repo(repo_id)
if not repo:
error_msg = 'Library %s not found.' % repo_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
path = request.data.get('path', None)
if not path:
error_msg = 'path invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
obj_id, s_type = self._generate_obj_id_and_type_by_path(repo_id, path)
except SearpcError as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if not obj_id:
if s_type == 'f':
error_msg = 'file %s not found.' % path
elif s_type == 'd':
error_msg = 'folder %s not found.' % path
else:
error_msg = 'path %s not found.' % path
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not check_folder_permission(request, repo_id, path):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
password = request.data.get('password', None)
if password and len(password) < config.SHARE_LINK_PASSWORD_MIN_LENGTH:
error_msg = _('Password is too short.')
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
expire_days = int(request.data.get('expire_days', 0))
except ValueError:
expire_days = 0
if expire_days <= 0:
expire_date = None
else:
expire_date = timezone.now() + relativedelta(days=expire_days)
username = request.user.username
if s_type == 'f':
fs = FileShare.objects.get_file_link_by_path(username, repo_id, path)
if not fs:
fs = FileShare.objects.create_file_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
elif s_type == 'd':
fs = FileShare.objects.get_dir_link_by_path(username, repo_id, path)
if not fs:
fs = FileShare.objects.create_dir_link(username, repo_id, path,
password, expire_date)
if is_org_context(request):
org_id = request.user.org.org_id
OrgFileShare.objects.set_org_file_share(org_id, fs)
link_info = get_share_link_info(fs)
return Response(link_info)
class ShareLink(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle, )
def _can_generate_shared_link(self, request):
return request.user.permissions.can_generate_shared_link()
def get(self, request, token):
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'token %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
link_info = get_share_link_info(fs)
return Response(link_info)
def delete(self, request, token):
""" delete share link.
"""
if not self._can_generate_shared_link(request):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
error_msg = 'token %s not found.' % token
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
username = request.user.username
if not fs.is_owner(username):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
fs.delete()
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
| saukrIppl/seahub | seahub/api2/endpoints/share_links.py | Python | apache-2.0 | 9,155 |
# Generated by Django 2.1 on 2018-08-13 08:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ibms', '0006_auto_20180813_1603'),
]
operations = [
migrations.RenameField(
model_name='serviceprioritymappings',
old_name='costcentreName',
new_name='costCentreName',
),
]
| parksandwildlife/ibms | ibms_project/ibms/migrations/0007_auto_20180813_1604.py | Python | apache-2.0 | 391 |
"""
Python wrapper for functionality exposed in the TemcaGraph dll.
@author: jayb
"""
from ctypes import *
import logging
import threading
import time
import os
import sys
import numpy as np
from pytemca.image.imageproc import fit_sin
from numpy.ctypeslib import ndpointer
if sys.flags.debug:
rel = "../x64/Debug/TemcaGraphDLL.dll"
else:
rel = "../x64/Release/TemcaGraphDLL.dll"
dll_path = os.path.join(os.path.dirname(__file__), rel)
class StatusCallbackInfo(Structure):
_fields_ = [
("status", c_int),
# -1 : fatal error
# 0: finishied init (startup),
# 1: starting new frame,
# 2: finished frame capture (ie. time to move the stage),
# 3: Sync step completed
# 4: Async step completed
# 5: Processing finished (except Async graphs)
# 6: Shutdown finished
("info_code", c_int),
# value indicates which sync or async step completed
("error_string", c_char * 256)
]
STATUSCALLBACKFUNC = CFUNCTYPE(c_int, POINTER(StatusCallbackInfo)) # returns c_int
class CameraInfo(Structure):
'''
Information about the current camera in use.
'''
_fields_ = [
("width", c_int),
("height", c_int),
("format", c_int),
("pixel_depth", c_int),
("camera_bpp", c_int),
("camera_model", c_char * 256),
("camera_id", c_char * 256)
]
class FocusInfo(Structure):
'''
Information about focus quality.
'''
_fields_ = [
("focus_score", c_float),
("astig_score", c_float),
("astig_angle", c_float),
("astig_profile", c_float * 360)
]
class QCInfo(Structure):
'''
Information about image quality.
'''
_fields_ = [
("min_value", c_int),
("max_value", c_int),
("mean_value", c_int),
("histogram", c_int * 256),
]
class ROIInfo(Structure):
'''
Information about the selected ROI used for stitching.
'''
_fields_ = [
("gridX", c_int),
("gridY", c_int),
]
class MatcherInfo(Structure):
'''
Match parameters from the Matcher.
'''
_fields_ = [
("dX", c_float),
("dY", c_float),
("distance", c_float),
("rotation", c_float),
("good_matches", c_int),
]
class TemcaGraphDLL(object):
"""
Hooks onto the C++ DLL. These are all the foreign functions we are going to be using
from the dll, along with their arguments types and return values.
"""
_TemcaGraphDLL = WinDLL(dll_path)
open = _TemcaGraphDLL.temca_open
open.argtypes = [c_int, c_char_p, STATUSCALLBACKFUNC]
open.restype = c_uint32
close = _TemcaGraphDLL.temca_close
close.argtype = [None]
close.restype = c_uint32
set_mode = _TemcaGraphDLL.setMode
set_mode.argtypes = [c_char_p]
set_mode.restype = c_uint32
get_camera_info = _TemcaGraphDLL.getCameraInfo
get_camera_info.restype = CameraInfo
get_focus_info = _TemcaGraphDLL.getFocusInfo
get_focus_info.restype = FocusInfo
set_fft_size = _TemcaGraphDLL.setFFTSize
set_fft_size.argtypes = [c_int, c_int, c_int]
set_fft_size.restype = None
get_qc_info = _TemcaGraphDLL.getQCInfo
get_qc_info.restype = QCInfo
grab_frame = _TemcaGraphDLL.grabFrame
grab_frame.argtypes = [c_char_p, c_int, c_int]
grab_frame.restype = None
get_last_frame = _TemcaGraphDLL.getLastFrame
get_last_frame.argtypes = [ndpointer(c_uint16, flags="C_CONTIGUOUS")]
get_last_frame.restype = None
get_preview_frame = _TemcaGraphDLL.getPreviewFrame
get_preview_frame.argtypes = [ndpointer(c_uint8, flags="C_CONTIGUOUS")]
get_preview_frame.restype = None
set_parameter = _TemcaGraphDLL.setParameter
set_parameter.argtypes = [c_char_p, c_int]
set_parameter.restype = None
get_parameter = _TemcaGraphDLL.getParameter
get_parameter.argtypes = [c_char_p]
get_parameter.restype = c_uint32
get_status = _TemcaGraphDLL.getStatus
get_status.restype = StatusCallbackInfo
setRoiInfo = _TemcaGraphDLL.setROI
setRoiInfo.restype = None
setRoiInfo.argtypes = [ POINTER( ROIInfo) ]
grab_matcher_template = _TemcaGraphDLL.grabMatcherTemplate
grab_matcher_template.restype = None
grab_matcher_template.argtypes = [c_int, c_int, c_int, c_int]
get_matcher_info = _TemcaGraphDLL.getMatcherInfo
get_matcher_info.restype = MatcherInfo
get_matcher_info.argtypes = None
class TemcaGraph(object):
'''
Python class which wraps the C++ TemcaGraphDLL and provides the linkage between Python and the C++ OpenCVGraph world.
The Python events which are triggered by C++ callbacks are::
eventInitCompleted - all graphs have finished building
eventStartNewFrame - ready for client to issue a frame grab request
eventCaptureCompleted - exposure completed
eventCapturePostProcessingCompleted - xfer to CUDA, upshift, Bright/Dark correction finished
eventSyncProcessingCompleted - Synchronous processing has finished
eventAsyncProcessingCompleted - Asynchronous processing has finished (may overlap next exposure)
eventFiniCompleted - graph has finished shutting down
'''
def __init__(self,):
'''
Many additional class variables are defined in the open() function
'''
self.aborting = False
self.eventInitCompleted = threading.Event() # Event signalling that initialization is complete.
self.eventStartNewFrame = threading.Event()
self.eventCaptureCompleted = threading.Event()
self.eventCapturePostProcessingCompleted = threading.Event()
self.eventSyncProcessingCompleted = threading.Event()
self.eventAsyncProcessingCompleted = threading.Event()
self.eventFiniCompleted = threading.Event()
# all events after eventStartNewFrame, and before eventFiniCompleted
self.eventsAllCaptureLoop = [self.eventCaptureCompleted,
self.eventCapturePostProcessingCompleted,
self.eventSyncProcessingCompleted,
self.eventAsyncProcessingCompleted]
self.threadLock = threading.Lock()
self.preview_decimation_factor = 4
self.wait_time = 10 # in seconds. If we reach this limit, its an error
def wait_graph_event (self, event):
'''
Waits for the specified event to signal indicating a change in the graph state,
and then clears the event.
'''
self.threadLock.acquire()
event.wait(self.wait_time)
event.clear()
self.threadLock.release()
def wait_all_capture_events(self):
for e in self.eventsAllCaptureLoop:
self.wait_graph_event(e)
def wait_start_of_frame(self):
'''
Wait for the event which indicates the graph is ready to start a new frame.
'''
self.wait_graph_event(self.eventStartNewFrame)
def open(self, dummyCamera = False, dummyPath = None, callback=None):
'''
Open up the Temca C++ DLL.
If dummyCamera is True, create a dummy TEMCA image source using...
either a real camera, image, directory, or movie according to dummyPath which MUST be specified
as no default path is provided. If dummyPath is an integer string, then an OpenCV camera will be used
corresponding to that index.
'''
if callback == None:
callback = self.statusCallback
# prevent the callback from being garbage collected !!!
self.callback = STATUSCALLBACKFUNC(callback)
self.dummyPath = dummyPath
t = time.clock()
if not TemcaGraphDLL.open(dummyCamera, self.dummyPath, self.callback):
raise EnvironmentError('Cannot open TemcaGraphDLL. Possiblities: camera, is offline, not installed, or already in use')
logging.info("TemcaGraph DLL initialized in %s seconds" % (time.clock() - t))
self.eventInitCompleted.wait()
# get info about frame dimensions
fi = self.get_camera_info()
self.image_width = fi['width']
self.image_height = fi['height']
self.pixel_depth = fi['pixel_depth'] # 16 ALWAYS
self.camera_bpp = fi['camera_bpp'] # 12 for Ximea (upshift to full 16 bpp)
self.camera_model = fi['camera_model']
self.camera_id = fi['camera_id']
# if this is changed dynamically, reallocate preview frames
self.set_parameter('preview_decimation_factor', self.preview_decimation_factor)
def close(self):
'''
Close down all graphs.
'''
TemcaGraphDLL.close()
def set_mode(self, graphType):
'''
Sets the overall mode of operation for the Temca graph.
Each mode activates a subset of the overall graph.::
graphType SYNC ASYNC
-----------------------------------------------------
temca : ximea, postCap, QC Stitch
Focus
FileWriter
raw : ximea, postCap, FileWriter
preview : ximea, postCap, QC
Focus
'''
return TemcaGraphDLL.set_mode(graphType)
def set_parameter(self, parameter, value):
'''
General purpose way to set random parameters on the graph.
'value' must be an int. Valid parameters are::
'exposure' for Ximea, this is in microseconds
'gain' for Ximea, this is in dB * 1000
'preview_decimation_factor' (2, 4, 8, ...)
'''
TemcaGraphDLL.set_parameter(parameter, value)
def get_parameter(self, parameter):
'''
General purpose way to get random parameters on the graph.
Return value is an int. Valid parameters are given under set_parameter.
'''
return TemcaGraphDLL.get_parameter(parameter)
def get_camera_info(self):
'''
Returns a dictionary with details of the capture format including width, height, bytes per pixel, and the camera model and serial number.
'''
info = TemcaGraphDLL.get_camera_info()
return {'width' : info.width, 'height' : info.height,
'pixel_depth' : info.pixel_depth, 'camera_bpp' : info.camera_bpp,
'camera_model' : info.camera_model, 'camera_id' : info.camera_id}
def get_focus_info(self):
''' returns focus and astigmatism values, some calculated in CUDA, some in python '''
info = TemcaGraphDLL.get_focus_info()
astig_amp, astig_angle, offset, wave = fit_sin(info.astig_profile)
astig_score = astig_amp/np.ptp(info.astig_profile)
array_type = c_float*len(info.astig_profile)
astig_profile_pointer = cast(info.astig_profile, POINTER(array_type))
astig_numpy = np.frombuffer(astig_profile_pointer.contents, dtype=np.float32)
# return the profile?
return {'focus_score': info.focus_score, 'astig_score': astig_score, 'astig_angle' : astig_angle,
'astig_profile' : astig_numpy,}
def set_fft_size(self, dimension, start_freq, end_freq):
''' Set the dimension of the FFT (which must be a power of 2) and the start and end frequency for focus/astig measurement.
Both start and end frequencies must be less than dimension.
'''
TemcaGraphDLL.set_fft_size(dimension, start_freq, end_freq);
def get_qc_info(self):
''' Get the min, max, mean, and histogram from the last image acquired. '''
info = TemcaGraphDLL.get_qc_info()
array_type = c_int*len(info.histogram)
hist_profile_pointer = cast(info.histogram, POINTER(array_type))
hist_numpy = np.frombuffer(hist_profile_pointer.contents, dtype=np.int32)
return {'min':info.min_value, 'max': info.max_value, 'mean':info.mean_value, 'histogram':hist_numpy}
def grab_matcher_template(self, x, y, width, height):
''' Set the ROI to use as the template on the next image acquired. '''
TemcaGraphDLL.grab_matcher_template(x, y, width, height)
def get_matcher_info(self):
''' Return Match status from the matcher. If "good_matches" is 0, then the match operation failed'''
info = TemcaGraphDLL.get_matcher_info()
return {'dX': info.dX, 'dY': info.dY, 'distance': info.distance, 'rotation': info.rotation, 'good_matches': info.good_matches}
def get_status(self):
return TemcaGraphDLL.get_status()
def grab_frame(self, filename = "none", roiX = 0, roiY = 0):
'''
Trigger capture of a frame. This function does not wait for completion of anything.
'''
TemcaGraphDLL.grab_frame(filename, roiX, roiY)
def grab_frame_wait_completion(self, filename = "none", roiX = 0, roiY = 0):
'''
Trigger capture of a frame. This function waits for completion of all graphs.
'''
self.wait_start_of_frame()
self.grab_frame(filename, roiX, roiY) # filename doesn't matter in preview, nor does roi
self.wait_all_capture_events()
def allocate_frame(self):
'''
Allocate memory as a numpy array to hold a complete frame (16bpp grayscale).
'''
return np.zeros(shape=(self.image_width,self.image_height), dtype= np.uint16)
def allocate_preview_frame(self):
'''
Allocate memory as a numpy array to hold a preview frame (8bpp grayscale).
'''
return np.zeros(shape=(self.image_width/self.preview_decimation_factor,self.image_height/self.preview_decimation_factor), dtype= np.uint8)
def get_last_frame(self, img):
'''
Get a copy of the last frame captured as an ndarray (16bpp grayscale).
This must be called only after eventCapturePostProcessingCompleted has signaled and before the next frame is acquired.
'''
assert (img.shape == (self.image_width, self.image_height) and (img.dtype.type == np.uint16))
TemcaGraphDLL.get_last_frame(img)
def get_preview_frame(self, img):
'''
Get a copy of the preview image as an ndarray (8bpp grayscale).
This must be called only after eventCapturePostProcessingCompleted has signaled and before the next frame is acquired.
'''
assert (img.shape == (self.image_width/self.preview_decimation_factor, self.image_height/self.preview_decimation_factor) and (img.dtype.type == np.uint8))
TemcaGraphDLL.get_preview_frame(img)
def optimize_exposure(self):
'''
Search for optimal exposure value using binary search.
'''
min_high_value = 61000
max_high_value = 63000
exposure_step = 100000 #uS
self.set_mode('preview')
exp = self.get_parameter('exposure')
def _searchDirection():
''' return 0 = just right, 1 go up, -1 go down '''
self.grab_frame_wait_completion()
info = self.get_qc_info()
m = info['max']
if m > min_high_value and m < max_high_value:
return 0 # just right
elif m >= max_high_value:
return +1 # too high
else:
return -1 # too low
#overshoot top end
dir = _searchDirection()
while dir < 0:
exp = exp + exposure_step
self.set_parameter('exposure', exp)
dir = _searchDirection()
if dir == 0:
return;
exp_top = exp
#overshoot bottom end
while dir > 0:
exp = exp - exposure_step
self.set_parameter('exposure', exp)
dir = _searchDirection()
if dir == 0:
return;
exp_bottom = exp
# binary search, starting from bottom
exposure_step = exp_top - exp_bottom
while dir != 0 and exposure_step >= 2:
exposure_step = exposure_step / 2
if dir < 0:
exp += exposure_step
else:
exp -= exposure_step
self.set_parameter('exposure', exp)
dir = _searchDirection()
def set_roi_info (self, roiInfo):
'''
Set the dimensions of the ROI. This information is used for stitching.
'''
TemcaGraphDLL.setRoiInfo (roiInfo)
def statusCallback (self, statusInfo):
'''
Called by the C++ Temca graph runner whenever status changes.
These values correspond to the Python events activated. ::
-1 : fatal error
0: finished init (startup)
1: starting new frame
2: finished frame capture (ie. time to move the stage)
3: capture post processing finished (preview ready)
4: Sync step completed
5: Async step completed
6: Shutdown finished
'''
retValue = True
status = statusInfo.contents.status
info = statusInfo.contents.info_code
#logging.info ('callback status: ' + str(status) + ', info: ' + str(info))
tid = threading.currentThread()
if (status == -1):
self.aborting = True
error_string = statusInfo.contents.error_string
logging.info ('callback error is' + error_string)
retValue = False
elif status == 0:
# finished initialization of all graphs
self.eventInitCompleted.set()
elif status == 1:
# ready to start the next frame (start of the loop)
self.eventStartNewFrame.set()
elif status == 2:
# capture completed
# (move the stage now)
self.eventCaptureCompleted.set()
elif status == 3:
# post processing finished (*16, bright dark, spatial correction, preview ready)
self.eventCapturePostProcessingCompleted.set()
elif status == 4:
# all synchronous processing for the frame is complete
self.eventSyncProcessingCompleted.set()
elif status == 5:
# all asynchronous processing for the frame is complete
self.eventAsyncProcessingCompleted.set()
elif status == 6:
# graph is finished all processing. Close app.
self.eventFiniCompleted.set()
return retValue
if __name__ == '__main__':
import cv2
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Open the DLL which runs all TEMCA graphs
#os.environ["PATH"] += os.pathsep
temcaGraph = TemcaGraph()
temcaGraph.open(dummyCamera = True)
showRawImage = True
showPreviewImage = True
if showRawImage or showPreviewImage:
import numpy as np
if showRawImage:
imgRaw = temcaGraph.allocate_frame()
if showPreviewImage:
imgPreview = temcaGraph.allocate_preview_frame() # 8bpp and decimated
# wait for graph to complete initialization
temcaGraph.eventInitCompleted.wait(temcaGraph.wait_time)
#temcaGraph.optimize_exposure()
temcaGraph.set_mode('preview')
#for j in range(10):
# temcaGraph.grab_frame_wait_completion()
# sys.stdout.write('.')
# info = temcaGraph.get_qc_info()
#for mode in ['temca', 'preview', 'raw']:
#for mode in ['temca']:
for mode in ['preview']:
print
print mode
temcaGraph.set_mode(mode)
frameCounter = 0
# set ROI grid size (for stitching only)
roiInfo = ROIInfo()
roiInfo.gridX = 5
roiInfo.gridY = 5
temcaGraph.set_roi_info (roiInfo)
for y in range(roiInfo.gridY):
for x in range (roiInfo.gridX):
if temcaGraph.aborting:
break
temcaGraph.wait_start_of_frame()
temcaGraph.grab_frame('j:/junk/pyframe' + str(frameCounter) + '.tif', x, y) # filename doesn't matter in preview
sys.stdout.write('.')
temcaGraph.wait_graph_event(temcaGraph.eventCaptureCompleted)
# move the stage here
# wait for Async ready event (stitching complete for previous frame)
if frameCounter > 0:
temcaGraph.wait_graph_event(temcaGraph.eventAsyncProcessingCompleted)
# wait for preview ready event
temcaGraph.wait_graph_event(temcaGraph.eventCapturePostProcessingCompleted)
# get a copy of the frame and display it?
if showRawImage:
temcaGraph.get_last_frame(imgRaw)
cv2.imshow('imgRaw', imgRaw)
cv2.waitKey(1);
# get a copy of the preview and display it?
if showPreviewImage:
temcaGraph.get_preview_frame(imgPreview)
cv2.imshow('imgPreview', imgPreview)
cv2.waitKey(1);
# wait for Sync ready event (QC and Focus complete)
temcaGraph.wait_graph_event(temcaGraph.eventSyncProcessingCompleted)
qcInfo = temcaGraph.get_qc_info()
#histogram = qcInfo['histogram']
focusInfo = temcaGraph.get_focus_info()
#print qcInfo
frameCounter += 1
temcaGraph.close()
temcaGraph.wait_graph_event(temcaGraph.eventFiniCompleted)
| jaybo/OpenCVGraph | TemcaGraphPy/temca_graph.py | Python | apache-2.0 | 21,780 |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import numpy as np
import mxnet as mx
import random
import math
from mxnet.executor_manager import _split_input_slice
from utils.image import tensor_vstack
from segmentation.segmentation import get_segmentation_train_batch, get_segmentation_test_batch
from PIL import Image
from multiprocessing import Pool
class TestDataLoader(mx.io.DataIter):
def __init__(self, segdb, config, batch_size=1, shuffle=False):
super(TestDataLoader, self).__init__()
# save parameters as properties
self.segdb = segdb
self.batch_size = batch_size
self.shuffle = shuffle
self.config = config
# infer properties from roidb
self.size = len(self.segdb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [None for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
data, label, im_info = get_segmentation_test_batch(segdb, self.config)
self.data = [[mx.nd.array(data[i][name]) for name in self.data_name] for i in xrange(len(data))]
self.im_info = im_info
class TrainDataLoader(mx.io.DataIter):
def __init__(self, sym, segdb, config, batch_size=1, crop_height = 768, crop_width = 1024, shuffle=False, ctx=None, work_load_list=None):
"""
This Iter will provide seg data to Deeplab network
:param sym: to infer shape
:param segdb: must be preprocessed
:param config: config file
:param batch_size: must divide BATCH_SIZE(128)
:param crop_height: the height of cropped image
:param crop_width: the width of cropped image
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:return: DataLoader
"""
super(TrainDataLoader, self).__init__()
# save parameters as properties
self.sym = sym
self.segdb = segdb
self.config = config
self.batch_size = batch_size
if self.config.TRAIN.ENABLE_CROP:
self.crop_height = crop_height
self.crop_width = crop_width
else:
self.crop_height = None
self.crop_width = None
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
# infer properties from segdb
self.size = len(segdb)
self.index = np.arange(self.size)
# decide data and label names
self.data_name = ['data']
self.label_name = ['label']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# init multi-process pool
self.pool = Pool(processes = len(self.ctx))
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_parallel()
random.seed()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_parallel()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
_, label_shape, _ = self.sym.infer_shape(**max_shapes)
label_shape = [(self.label_name[0], label_shape)]
return max_data_shape, label_shape
def get_batch_parallel(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
segdb = [self.segdb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
multiprocess_results = []
for idx, islice in enumerate(slices):
isegdb = [segdb[i] for i in range(islice.start, islice.stop)]
multiprocess_results.append(self.pool.apply_async(parfetch, (self.config, self.crop_width, self.crop_height, isegdb)))
rst = [multiprocess_result.get() for multiprocess_result in multiprocess_results]
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(config, crop_width, crop_height, isegdb):
# get testing data for multigpu
data, label = get_segmentation_train_batch(isegdb, config)
if config.TRAIN.ENABLE_CROP:
data_internal = data['data']
label_internal = label['label']
sx = math.floor(random.random() * (data_internal.shape[3] - crop_width + 1))
sy = math.floor(random.random() * (data_internal.shape[2] - crop_height + 1))
sx = (int)(sx)
sy = (int)(sy)
assert(sx >= 0 and sx < data_internal.shape[3] - crop_width + 1)
assert(sy >= 0 and sy < data_internal.shape[2] - crop_height + 1)
ex = (int)(sx + crop_width - 1)
ey = (int)(sy + crop_height - 1)
data_internal = data_internal[:, :, sy : ey + 1, sx : ex + 1]
label_internal = label_internal[:, :, sy : ey + 1, sx : ex + 1]
data['data'] = data_internal
label['label'] = label_internal
assert (data['data'].shape[2] == crop_height) and (data['data'].shape[3] == crop_width)
assert (label['label'].shape[2] == crop_height) and (label['label'].shape[3] == crop_width)
return {'data': data, 'label': label}
| deepinsight/Deformable-ConvNets | deeplab/core/loader.py | Python | apache-2.0 | 9,374 |
# coding=utf-8
import json
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
import django.views
from django.template import defaultfilters as template_filters
from horizon import tables
from horizon import exceptions
from cloudkittydashboard.api import cloudkitty as api
from openstack_dashboard.api import keystone
from cloudkittydashboard.dashboards.project.billing_overview import tables as project_tables
import time
from datetime import date, timedelta, datetime
import calendar
from django.http import JsonResponse,HttpResponse
import json
import xlsxwriter
import StringIO
import logging
LOG = logging.getLogger(__name__)
def detail(request, org_id):
if org_id == None:
org_id = get_tenant_id(request)
try:
details = api.cloudkittyclient(request).billings.list_services_cost(get_month(request), org_id)
except Exception:
details = []
exceptions.handle(request, _('Unable to retrieve billing list.'))
return HttpResponse(json.dumps(details),content_type="application/json")
class IndexView(tables.DataTableView):
# A very simple class-based view...
template_name = "project/billing_overview/index.html"
table_class = project_tables.BillingOverviewTable
page_title = _("Billing Overview")
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context["tenant_id"] = get_tenant_id(self.request)
context["selected_month"] = get_month(self.request)
context["organizations"] = get_tenant_list(self.request)
year = time.strftime("%Y",time.localtime())
month = time.strftime("%m",time.localtime())
if int(month) == 1:
last_month = 12
last_year = int(year) - 1
else:
last_month = int(month) - 1
last_year = year
try:
context["year_begin"] = str((int(year)-1)) + "/" + str((int(month)))
context["year_end"] = str(last_year) + "/" + str(last_month)
# get last 12 months total cost
total_year = api.cloudkittyclient(self.request).billings.get_consumer_trends("month",
12,
get_tenant_id(self.request))
year_sum = 0
for billing_month in total_year["consumerTrends"]:
year_sum += billing_month["cost"]
context["billing_year"] = year_sum
#get current month cost
context["time_current_month"] = year+"/"+month
services_rate_list = api.cloudkittyclient(self.request).billings.list_services_cost(year+"-"+month,
get_tenant_id(self.request))
current_sum = 0
for rate in services_rate_list["servicesRate"]:
current_sum += rate["rate"]
context["billing_current_month"] = current_sum
#get last month cost
context["time_last_month"] = str(last_year)+"/"+str(last_month)
context["billing_last_month"] = api.cloudkittyclient(self.request).billings.get_consumer_trends("month",
1,
get_tenant_id(self.request))["consumerTrends"][0]["cost"]
except Exception:
exceptions.handle(self.request,_("Unable to retrieve month cost"))
today = date.today()
context["last_12_months"] = last_12_months()
return context;
def get_data(self):
try:
billings = api.cloudkittyclient(self.request).billings.get_total_cost(get_month(self.request), get_tenant_id(self.request))["totals"]
except Exception:
billings = []
exceptions.handle(self.request, _('Unable to retrieve billing list.'))
return billings
class ReportView(django.views.generic.TemplateView):
def get(self,request,*args,**kwargs):
tenant_id = get_tenant_id(self.request)
billing_month = get_month(self.request)
tenants = get_tenant_list(self.request)
for tenant in tenants:
if tenant.id == tenant_id:
tenant_name = tenant.name
break
reports = api.cloudkittyclient(self.request).billings.list_month_report(tenant_id,billing_month)
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
month_sheet = workbook.add_worksheet(tenant_name)
#设置列宽度
month_sheet.set_column('A:Z',9)
#表头
head = (u'部门',u'资源',
u'1月',u'2月',u'3月', u'1Q合计',
u'4月',u'5月',u'6月', u'2Q合计', u'上半年计',
u'7月',u'8月',u'9月', u'3Q合计',
u'10月',u'11月',u'12月',u'4Q合计',u'下半年计',u'全年合计'
)
# 设置表头字符串和格式
head_format = workbook.add_format({
'bold':True,
'font_size':20,
'font_name':'Microsoft YaHei'
})
row = 1
col = 0
head_str = billing_month.split('-')[0] + u'年度月别计费一览表'
head_str1 = u'资源及使用费用情况'
month_sheet.write(row,col,head_str,head_format)
row += 1
month_sheet.write(row,col,u'如需查看季、年度合计,请在月份对应位置取消隐藏')
row += 2
month_sheet.write(row,col,head_str1,head_format)
explain_format = workbook.add_format({'align':'right'})
year_month = billing_month.split('-')
if billing_month == template_filters.date(date.today(), "Y-m"):
tab_date = u'制表日期:%d月%d日' %(int(year_month[1]),date.today().day-1)
else:
tab_date = u'制表日期:%d月%d日' %(int(year_month[1]),calendar.monthrange(int(year_month[0]),int(year_month[1]))[1])
month_sheet.write(row,len(head)-1,u'单位:元 ' + tab_date, explain_format)
row += 1
col = 0
head2_format = workbook.add_format({
'bold':True,
'align':'center',
'valign':'vcenter',
'bg_color':'#D8E4BC',
'left':1,
'font_name':'Microsoft YaHei'
})
#设置行高
month_sheet.set_row(row,30)
for index_str in head:
month_sheet.write(row,col,index_str,head2_format)
col += 1
row += 1
month_sheet.set_column('A:A',15)
#资源和合计所占行数
names = ['Compute','Volume',u'合计']
even_format = workbook.add_format({
'border':1,
'font_name':'Microsoft YaHei',
'num_format': '#,##0.00'
})
odd_format=workbook.add_format({
'border':1,
'font_name':'Microsoft YaHei',
'bg_color':'#D9D9D9',
'num_format': '#,##0.00'
})
resource_total_rows = 3
# 处理每个部门
merge_format = workbook.add_format({
'bold':True,
'font_name':'Microsoft YaHei',
'font_size':14,
'align':'center',
'valign':'vcenter',
'border':1
})
for depart in reports['departs']:
col = 1
for index,name in enumerate(names):
if index % 2 != 0:
month_sheet.set_row(row+index,None,odd_format)
else:
month_sheet.set_row(row+index,None,even_format)
month_sheet.write(row+index,col,name)
month_sheet.merge_range(row,0,row+resource_total_rows-1,0,depart['tenant_name'],merge_format)
tmp_row = row
write_col = col + 1
for month_report in depart['month_reports']:
for res_tpye in month_report['res_types']:
if res_tpye['res_type'] == "compute":
write_row = tmp_row
elif res_tpye['res_type'] == "volume":
write_row = tmp_row + 1
month_sheet.write(write_row,write_col,res_tpye['rate'])
write_col += 1
month = int(month_report["month"].split('-')[1])
if month == 3:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,write_col,'=SUM(C' + str(index_row+1) + ':E' + str(index_row+1) + ')')
write_col += 1
elif month == 6:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,write_col,'=SUM(G' + str(index_row+1) + ':I' + str(index_row+1) + ')')
month_sheet.write(index_row,write_col+1,'=SUM(F' + str(index_row+1) + '+J' + str(index_row+1) + ')')
write_col += 2
elif month == 9:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,write_col,'=SUM(L' + str(index_row+1) + ':N' + str(index_row+1) + ')')
write_col += 1
elif month == 12:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,write_col,'=SUM(P' + str(index_row+1) + ':R' + str(index_row+1) + ')')
month_sheet.write(index_row,write_col+1,'=SUM(O' + str(index_row+1) + '+S' + str(index_row+1) + ')')
month_sheet.write(index_row,write_col+2,'=SUM(K' + str(index_row+1) + '+T' + str(index_row+1) + ')')
write_col += 3
#处理后面的年统计和季度统计
for month in range(1,13):
if month == 3:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,5,'=SUM(C' + str(index_row+1) + ':E' + str(index_row+1) + ')')
elif month == 6:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,9,'=SUM(G' + str(index_row+1) + ':I' + str(index_row+1) + ')')
month_sheet.write(index_row,10,'=SUM(F' + str(index_row+1) + '+J' + str(index_row+1) + ')')
elif month == 9:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,14,'=SUM(L' + str(index_row+1) + ':N' + str(index_row+1) + ')')
elif month == 12:
for index in range(resource_total_rows-1):
index_row = tmp_row + index
month_sheet.write(index_row,18,'=SUM(P' + str(index_row+1) + ':R' + str(index_row+1) + ')')
month_sheet.write(index_row,19,'=SUM(O' + str(index_row+1) + '+S' + str(index_row+1) + ')')
month_sheet.write(index_row,20,'=SUM(K' + str(index_row+1) + '+T' + str(index_row+1) + ')')
month_sheet.write_array_formula('C' + str(tmp_row + resource_total_rows) + ':U' + str(tmp_row + resource_total_rows ),
'{=C' + str(tmp_row + 1) + ':U' + str(tmp_row + 1) + '+' \
+ 'C' + str(tmp_row + resource_total_rows - 1) + ':U' + str(tmp_row + resource_total_rows - 1) + '}')
#跳过资源种类数目和合计的行
row = row + resource_total_rows
#部门之间中间隔一行
row += 1
month_sheet.print_area(0,0,row,len(head)-1)
month_sheet.fit_to_pages(1,1)
month_sheet.freeze_panes(0,1)
month_sheet.hide_zero()
month_sheet.set_column('F:F',None,None,{'hidden':1})
month_sheet.set_column('J:J',None,None,{'hidden':1})
month_sheet.set_column('K:K',None,None,{'hidden':1})
month_sheet.set_column('O:O',None,None,{'hidden':1})
month_sheet.set_column('S:S',None,None,{'hidden':1})
month_sheet.set_column('T:T',None,None,{'hidden':1})
month_sheet.set_column('V:XFD',None,None,{'hidden':1})
workbook.close()
output.seek(0)
response = HttpResponse(output.read())
response['Content-type']="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
response['Content-Disposition'] = "attachment; filename=" + str(billing_month) +"-report.xlsx"
return response
class TrendsView(django.views.generic.TemplateView):
def get(self, request, *args, **kwargs):
tenant_id = request.GET.get("tenant_id", request.user.tenant_id)
time_series = request.GET.get("time_series", "month")
try:
trends = api.cloudkittyclient(self.request).billings.get_consumer_trends(time_series,
12,
get_tenant_id(self.request))
except Exception:
trends = {}
exceptions.handle(request,_("Unable to retrieve trend data"))
# convert time and cost to x and y
for trend in trends["consumerTrends"]:
if time_series == u'month':
trend.update(x=time.strftime('%Y-%m-%dT%H:%M:%S%Z',time.strptime(trend.pop("time"),"%Y-%m")),y=trend.pop("cost"))
elif time_series == u'day':
trend.update(x=time.strftime('%Y-%m-%dT%H:%M:%S%Z',time.strptime(trend.pop("time"),"%Y-%m-%d")),y=trend.pop("cost"))
ret = {'series': [{
'name': 'admin',
'unit': 'CNY',
'time_series': time_series,
'data': trends["consumerTrends"]
}],
'settings': {
'verbose_date': False
}}
return HttpResponse(json.dumps(ret), content_type='application/json')
def get_month(request):
try:
month = request.GET.get("month", "%s-%s" % (date.today().year, date.today().month))
return month
except Exception:
return None
def get_tenant_id(request):
return request.GET.get("tenant_id", request.user.tenant_id)
def get_tenant_list(request):
return sorted(request.user.authorized_tenants, reverse=False, key=lambda x: getattr(x, "sortNumber", 0))
def last_12_months():
def back_months(dt, months):
month = (dt.month - months) or 12
year = dt.year - month / 12
return dt.replace(year=year, month=month, day=1)
date = datetime.today()
date_choices = [date]
for i in range(1, 12):
date = back_months(date, 1)
date_choices.append(date)
return date_choices
| FNST-OpenStack/cloudkitty-dashboard | cloudkittydashboard/dashboards/project/billing_overview/views.py | Python | apache-2.0 | 14,849 |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.core.optimizers import MetaOptimizer
class SubsamplingStep(MetaOptimizer):
"""
The subsampling-step meta optimizer randomly samples a subset of batch instances to calculate
the optimization step of another optimizer.
"""
def __init__(self, optimizer, fraction=0.1, scope='subsampling-step', summary_labels=()):
"""
Creates a new subsampling-step meta optimizer instance.
Args:
optimizer: The optimizer which is modified by this meta optimizer.
fraction: The fraction of instances of the batch to subsample.
"""
assert isinstance(fraction, float) and fraction > 0.0
self.fraction = fraction
super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
def tf_step(
self,
time,
variables,
arguments,
**kwargs
):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
arguments: Dict of arguments for callables, like fn_loss.
**kwargs: Additional arguments passed on to the internal optimizer.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
# Get some (batched) argument to determine batch size.
arguments_iter = iter(arguments.values())
some_argument = next(arguments_iter)
try:
while not isinstance(some_argument, tf.Tensor) or util.rank(some_argument) == 0:
if isinstance(some_argument, dict):
if some_argument:
arguments_iter = iter(some_argument.values())
some_argument = next(arguments_iter)
elif isinstance(some_argument, list):
if some_argument:
arguments_iter = iter(some_argument)
some_argument = next(arguments_iter)
elif some_argument is None or util.rank(some_argument) == 0:
# Non-batched argument
some_argument = next(arguments_iter)
else:
raise TensorForceError("Invalid argument type.")
except StopIteration:
raise TensorForceError("Invalid argument type.")
batch_size = tf.shape(input=some_argument)[0]
num_samples = tf.cast(
x=(self.fraction * tf.cast(x=batch_size, dtype=util.tf_dtype('float'))),
dtype=util.tf_dtype('int')
)
num_samples = tf.maximum(x=num_samples, y=1)
indices = tf.random_uniform(shape=(num_samples,), maxval=batch_size, dtype=tf.int32)
subsampled_arguments = util.map_tensors(
fn=(lambda arg: arg if util.rank(arg) == 0 else tf.gather(params=arg, indices=indices)),
tensors=arguments
)
return self.optimizer.step(
time=time,
variables=variables,
arguments=subsampled_arguments,
**kwargs
)
| lefnire/tensorforce | tensorforce/core/optimizers/subsampling_step.py | Python | apache-2.0 | 4,026 |
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.http.servlet import RestServlet
from ._base import client_v2_pattern, parse_json_dict_from_request
class TokenRefreshRestServlet(RestServlet):
"""
Exchanges refresh tokens for a pair of an access token and a new refresh
token.
"""
PATTERN = client_v2_pattern("/tokenrefresh")
def __init__(self, hs):
super(TokenRefreshRestServlet, self).__init__()
self.hs = hs
self.store = hs.get_datastore()
@defer.inlineCallbacks
def on_POST(self, request):
body = parse_json_dict_from_request(request)
try:
old_refresh_token = body["refresh_token"]
auth_handler = self.hs.get_handlers().auth_handler
(user_id, new_refresh_token) = yield self.store.exchange_refresh_token(
old_refresh_token, auth_handler.generate_refresh_token)
new_access_token = yield auth_handler.issue_access_token(user_id)
defer.returnValue((200, {
"access_token": new_access_token,
"refresh_token": new_refresh_token,
}))
except KeyError:
raise SynapseError(400, "Missing required key 'refresh_token'.")
except StoreError:
raise AuthError(403, "Did not recognize refresh token")
def register_servlets(hs, http_server):
TokenRefreshRestServlet(hs).register(http_server)
| iot-factory/synapse | synapse/rest/client/v2_alpha/tokenrefresh.py | Python | apache-2.0 | 2,090 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class SaharaDataSourceTest(common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['data-source']['properties'].copy()
props['type'] = 'hdfs'
props['url'] = 'my/path'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state)
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['data-source']['properties'].copy()
del props['credentials']['user']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex))
| openstack/heat | heat/tests/openstack/sahara/test_data_source.py | Python | apache-2.0 | 4,214 |
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
""" Typically called by Cloud Scheduler with recipe JSON payload.
Sample JSON POST payload:
{
"setup":{
"id":"", #string - Cloud Project ID for billing.
"auth":{
"service":{}, #dict - Optional Cloud Service JSON credentials when task uses service.
"user":{} #dict - Optional Cloud User JSON credentials when task uses user.
}
},
"tasks":[ # list of recipe tasks to execute, see StarThinker scripts for examples.
{ "hello":{
"auth":"user", # not used in demo, for display purposes only.
"say":"Hello World"
}}
]
}
Documentation: https://github.com/google/starthinker/blob/master/tutorials/deploy_cloudfunction.md
"""
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
def run(request):
recipe = request.get_json(force=True)
execute(Configuration(recipe=recipe, verbose=True), recipe.get('tasks', []), force=True)
return 'DONE'
| google/starthinker | cloud_function/main.py | Python | apache-2.0 | 1,752 |
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .linkstate import LinkState # noqa
from .node.local_router_id import LocalRouterID # noqa
from .node.name import NodeName # noqa
from .node.isisarea import ISISArea # noqa
from .node.sr_capabilities import SRCapabilities # noqa
from .node.sr_algorithm import SRAlgorithm # noqa
from .node.node_msd import NodeMSD # noqa
from .node.nodeflags import NodeFlags # noqa
from .node.opa_node_attr import OpaNodeAttr # noqa
from .node.sid_or_label import SIDorLabel # noqa
from .node.srlb import SRLB # noqa
from .link.admingroup import AdminGroup # noqa
from .link.remote_router_id import RemoteRouterID # noqa
from .link.max_bw import MaxBandwidth # noqa
from .link.max_rsv_bw import MaxResvBandwidth # noqa
from .link.unsrv_bw import UnrsvBandwidth # noqa
from .link.te_metric import TeMetric # noqa
from .link.link_name import LinkName # noqa
from .link.igp_metric import IGPMetric # noqa
from .link.adj_seg_id import AdjSegID # noqa
from .link.link_identifiers import LinkIdentifiers # noqa
from .link.link_msd import LinkMSD # noqa
from .link.lan_adj_sid import LanAdjSegID # noqa
from .link.srlg import SRLGList # noqa
from .link.mplsmask import MplsMask # noqa
from .link.protection_type import ProtectionType # noqa
from .link.opa_link_attr import OpaLinkAttr # noqa
from .link.peer_node_sid import PeerNodeSID # noqa
from .link.peer_adj_sid import PeerAdjSID # noqa
from .link.peer_set_sid import PeerSetSID # noqa
from .link.unidirect_link_delay import UnidirectLinkDelay # noqa
from .link.min_max_link_delay import MinMaxUnidirectLinkDelay # noqa
from .link.unidirect_delay_var import UnidirectDelayVar # noqa
from .link.unidirect_packet_loss import UnidirectPacketLoss # noqa
from .link.unidirect_residual_bw import UnidirectResidualBw # noqa
from .link.unidirect_avail_bw import UnidirectAvailBw # noqa
from .link.unidirect_bw_util import UnidirectBwUtil # noqa
from .prefix.prefix_metric import PrefixMetric # noqa
from .prefix.prefix_sid import PrefixSID # noqa
from .prefix.prefix_igp_attr import PrefixIGPAttr # noqa
from .prefix.src_router_id import SrcRouterID # noqa
from .prefix.igpflags import IGPFlags # noqa
from .prefix.igp_route_tag_list import IGPRouteTagList # noqa
from .prefix.ext_igp_route_tag_list import ExtIGPRouteTagList # noqa
from .prefix.ospf_forward_addr import OspfForwardingAddr # noqa
| meidli/yabgp | yabgp/message/attribute/linkstate/__init__.py | Python | apache-2.0 | 3,042 |
"""Support for the Italian train system using ViaggiaTreno API."""
import asyncio
import logging
import time
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, HTTP_OK, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by ViaggiaTreno Data"
VIAGGIATRENO_ENDPOINT = (
"http://www.viaggiatreno.it/viaggiatrenonew/"
"resteasy/viaggiatreno/andamentoTreno/"
"{station_id}/{train_id}/{timestamp}"
)
REQUEST_TIMEOUT = 5 # seconds
ICON = "mdi:train"
MONITORED_INFO = [
"categoria",
"compOrarioArrivoZeroEffettivo",
"compOrarioPartenzaZeroEffettivo",
"destinazione",
"numeroTreno",
"orarioArrivo",
"orarioPartenza",
"origine",
"subTitle",
]
DEFAULT_NAME = "Train {}"
CONF_NAME = "train_name"
CONF_STATION_ID = "station_id"
CONF_STATION_NAME = "station_name"
CONF_TRAIN_ID = "train_id"
ARRIVED_STRING = "Arrived"
CANCELLED_STRING = "Cancelled"
NOT_DEPARTED_STRING = "Not departed yet"
NO_INFORMATION_STRING = "No information for this train now"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TRAIN_ID): cv.string,
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ViaggiaTreno platform."""
train_id = config.get(CONF_TRAIN_ID)
station_id = config.get(CONF_STATION_ID)
if not (name := config.get(CONF_NAME)):
name = DEFAULT_NAME.format(train_id)
async_add_entities([ViaggiaTrenoSensor(train_id, station_id, name)])
async def async_http_request(hass, uri):
"""Perform actual request."""
try:
session = hass.helpers.aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await session.get(uri)
if req.status != HTTP_OK:
return {"error": req.status}
json_response = await req.json()
return json_response
except (asyncio.TimeoutError, aiohttp.ClientError) as exc:
_LOGGER.error("Cannot connect to ViaggiaTreno API endpoint: %s", exc)
except ValueError:
_LOGGER.error("Received non-JSON data from ViaggiaTreno API endpoint")
class ViaggiaTrenoSensor(SensorEntity):
"""Implementation of a ViaggiaTreno sensor."""
def __init__(self, train_id, station_id, name):
"""Initialize the sensor."""
self._state = None
self._attributes = {}
self._unit = ""
self._icon = ICON
self._station_id = station_id
self._name = name
self.uri = VIAGGIATRENO_ENDPOINT.format(
station_id=station_id, train_id=train_id, timestamp=int(time.time()) * 1000
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def extra_state_attributes(self):
"""Return extra attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return self._attributes
@staticmethod
def has_departed(data):
"""Check if the train has actually departed."""
try:
first_station = data["fermate"][0]
if data["oraUltimoRilevamento"] or first_station["effettiva"]:
return True
except ValueError:
_LOGGER.error("Cannot fetch first station: %s", data)
return False
@staticmethod
def has_arrived(data):
"""Check if the train has already arrived."""
last_station = data["fermate"][-1]
if not last_station["effettiva"]:
return False
return True
@staticmethod
def is_cancelled(data):
"""Check if the train is cancelled."""
if data["tipoTreno"] == "ST" and data["provvedimento"] == 1:
return True
return False
async def async_update(self):
"""Update state."""
uri = self.uri
res = await async_http_request(self.hass, uri)
if res.get("error", ""):
if res["error"] == 204:
self._state = NO_INFORMATION_STRING
self._unit = ""
else:
self._state = "Error: {}".format(res["error"])
self._unit = ""
else:
for i in MONITORED_INFO:
self._attributes[i] = res[i]
if self.is_cancelled(res):
self._state = CANCELLED_STRING
self._icon = "mdi:cancel"
self._unit = ""
elif not self.has_departed(res):
self._state = NOT_DEPARTED_STRING
self._unit = ""
elif self.has_arrived(res):
self._state = ARRIVED_STRING
self._unit = ""
else:
self._state = res.get("ritardo")
self._unit = TIME_MINUTES
self._icon = ICON
| lukas-hetzenecker/home-assistant | homeassistant/components/viaggiatreno/sensor.py | Python | apache-2.0 | 5,443 |
import urllib, urllib2, sys, httplib
url = "/MELA/REST_WS"
HOST_IP="109.231.126.217:8180"
#HOST_IP="localhost:8180"
if __name__=='__main__':
connection = httplib.HTTPConnection(HOST_IP)
description_file = open("./costTest.xml", "r")
body_content = description_file.read()
headers={
'Content-Type':'application/xml; charset=utf-8',
'Accept':'application/json, multipart/related'
}
connection.request('PUT', url+'/service', body=body_content,headers=headers,)
result = connection.getresponse()
print result.read()
| tuwiendsg/MELA | MELA-Extensions/MELA-ComplexCostEvaluationService/tests/mela-clients/submitServiceDescription.py | Python | apache-2.0 | 589 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: yyg
@Create: 2016MMDD
@LastUpdate: 2016-12-15 HH:MM:SS
@Version: 0.0
"""
from json import load
from logging import (Formatter, _defaultFormatter, exception,
getLogger, FileHandler, basicConfig, StreamHandler)
from cloghandler import ConcurrentRotatingFileHandler
from params import (LOG_CONF_FILE, LOG_LVL, LOGGER_NAME,
LOG_FILE, LOG_DAT_FMT, LOG_FMT)
class LaserjetLogger(object):
"""
Compatible to python 2.6+
"""
def __init__(self):
self.fmt = LOG_FMT
self.datefmt = LOG_DAT_FMT
self._start()
def _start(self):
logger = getLogger(LOGGER_NAME)
log_handler = ConcurrentRotatingFileHandler(LOG_FILE)
log_formatter = Formatter(self.fmt, self.datefmt)
log_handler.setFormatter(log_formatter)
console_handler = StreamHandler()
console_handler.setFormatter(log_formatter)
logger.setLevel(LOG_LVL)
logger.addHandler(log_handler)
logger.addHandler(console_handler)
logger.info("Logger activated")
def print_func(anything_str):
log = getLogger(LOGGER_NAME)
log.info(anything_str)
if __name__ == "__main__":
logger = LaserjetLogger()
test_pool = Pool()
for i in range(5):
test_pool.apply_async(print_func, args=(i,))
test_pool.close()
test_pool.join()
| hipnusleo/laserjet | lib/core/loggers.py | Python | apache-2.0 | 1,506 |
#!/usr/bin/env python
# Written against python 3.3.1
# Matasano Problem 3
# The hex encoded string:
# 1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736
# has been XOR'd against a single character. Find the key, decrypt
# the message.
# Some of the 'magic' in this comes from a college crypto course
# That course used Stinson's Cryptography book, 3rd edition
from prob1 import hexToRaw, rawToHexLUT
from prob2 import hex_xor
import string
letterFrequency = {}
letterFrequency['A'] = .082;
letterFrequency['B'] = .015;
letterFrequency['C'] = .028;
letterFrequency['D'] = .043;
letterFrequency['E'] = .127;
letterFrequency['F'] = .022;
letterFrequency['G'] = .020;
letterFrequency['H'] = .061;
letterFrequency['I'] = .070;
letterFrequency['J'] = .002;
letterFrequency['K'] = .008;
letterFrequency['L'] = .040;
letterFrequency['M'] = .024;
letterFrequency['N'] = .067;
letterFrequency['O'] = .075;
letterFrequency['P'] = .019;
letterFrequency['Q'] = .001;
letterFrequency['R'] = .060;
letterFrequency['S'] = .063;
letterFrequency['T'] = .091;
letterFrequency['U'] = .028;
letterFrequency['V'] = .010;
letterFrequency['W'] = .023;
letterFrequency['X'] = .001;
letterFrequency['Y'] = .020;
letterFrequency['Z'] = .001;
letterFrequency[' '] = .200;
# See page 35, Stinson
def calculateMG(plain):
counts = [];
for i in range(256):
counts.append(0);
for i in range(len(plain)):
if (plain[i] < 128):
counts[ord(chr(plain[i]).upper())] += 1;
result = 0.0;
for i in string.ascii_uppercase:
result += letterFrequency[i]*counts[ord(i)];
result += letterFrequency[' '] * counts[ord(' ')];
result /= len(plain);
return result;
def tryKey(cipher, key):
fullkey = key * len(cipher);
fullkey = fullkey[:len(cipher)];
potential_plain = hex_xor(cipher, fullkey);
return calculateMG(hexToRaw(potential_plain)), potential_plain;
def findGoodKeys(cipher):
for i in range(256):
mg, plain = tryKey(cipher, rawToHexLUT[i]);
#print(str(i) + ": " + str(mg));
if (mg > .050):
print("potential key: 0x" + rawToHexLUT[i]);
print("Potential hex(plain): " + str(plain).lstrip("b'").rstrip("'"));
print("potential plaintext: " + str(hexToRaw(str(plain).lstrip("b'").rstrip("'"))).lstrip("b'").rstrip("'"));
if __name__ == "__main__":
cip = b'1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736';
findGoodKeys(cip);
| reschly/cryptopals | prob3.py | Python | apache-2.0 | 2,592 |
#!/usr/bin/env python3
"""
./e09asynctwostage.py http://camlistore.org 1 6
Found 10 urls
http://camlistore.org/ frequencies: [('camlistore', 13), ...]
...
First integer arg is depth, second is minimum word count.
"""
import re
from sys import argv
import asyncio
from e01extract import canonicalize
from e04twostage import print_popular_words
from e06asyncextract import extract_async
@asyncio.coroutine
def wordcount_async(data, word_length):
counts = {}
for match in re.finditer('\w{%d,100}' % word_length, data):
word = match.group(0).lower()
counts[word] = counts.get(word, 0) + 1
return counts
@asyncio.coroutine
def extract_count_async(url, word_length):
_, data, found_urls = yield from extract_async(url)
top_word = yield from wordcount_async(data, word_length)
return url, top_word, found_urls
@asyncio.coroutine
def twostage_async(to_fetch, seen_urls, word_length):
futures, results = [], []
for url in to_fetch:
if url in seen_urls: continue
seen_urls.add(url)
futures.append(extract_count_async(url, word_length))
for future in asyncio.as_completed(futures):
try:
results.append((yield from future))
except Exception:
continue
return results
@asyncio.coroutine
def crawl_async(start_url, max_depth, word_length):
seen_urls = set()
to_fetch = [canonicalize(start_url)]
results = []
for depth in range(max_depth + 1):
batch = yield from twostage_async(to_fetch, seen_urls, word_length)
to_fetch = []
for url, data, found_urls in batch:
results.append((url, data))
to_fetch.extend(found_urls)
return results
def main():
# Bridge the gap between sync and async
future = asyncio.Task(crawl_async(argv[1], int(argv[2]), int(argv[3])))
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
loop.close()
result = future.result()
print_popular_words(result)
if __name__ == '__main__':
main()
| bslatkin/pycon2014 | e09asynctwostage.py | Python | apache-2.0 | 2,046 |
#!/usr/bin/python
# Copyright 2014 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# this script will create a set of metrics at the endpoint specified as the
# program parameter
#
#
import json
import random
import requests
import string
import sys
import time
MOLD = {"name": "name1",
"timestamp": '2014-12-01',
"value": 100
}
MOLD_DIMENSIONS = {"key1": None}
def setup_metrics(argv):
for a in range(100):
MOLD_DIMENSIONS['key1'] = (
''.join(random.sample(string.ascii_uppercase * 6, 6)))
MOLD_DIMENSIONS['key2'] = (
''.join(random.sample(string.ascii_uppercase * 6, 6)))
MOLD_DIMENSIONS['key_' + str(a)] = (
''.join(random.sample(string.ascii_uppercase * 6, 6)))
"""
import hashlib
key_str = json.dumps(MOLD_DIMENSIONS, sort_keys=True,
indent=None,
separators=(',', ':'))
key = hashlib.md5(key_str).hexdigest()
MOLD['dimensions_hash'] = key
"""
MOLD['dimensions'] = MOLD_DIMENSIONS
print('starting round %s' % a)
# Generate unique 100 metrics
for i in range(100):
MOLD['name'] = ''.join(random.sample(string.ascii_uppercase * 6,
6))
for j in range(10):
MOLD['value'] = round((i + 1) * j * random.random(), 2)
the_time = time.time()
# single messages
for k in range(10):
factor = round(random.random(), 2) * 100
MOLD['timestamp'] = the_time + k * 50000 * factor
MOLD['value'] = i * j * k * random.random()
res = requests.post(argv[1], data=json.dumps(MOLD))
if res.status_code != 201 and res.status_code != 204:
print(json.dumps(MOLD))
exit(0)
# multiple messages
for k in range(3):
msg = "["
factor = round(random.random(), 2) * 100
MOLD['timestamp'] = the_time + k * 50000 * factor
MOLD['value'] = i * j * k * random.random()
msg += json.dumps(MOLD)
for l in range(9):
factor = round(random.random(), 2) * 100
MOLD['timestamp'] = the_time + k * 50000 * factor
MOLD['value'] = i * j * k * random.random()
msg += ',' + json.dumps(MOLD)
msg += "]"
res = requests.post(argv[1], data=msg)
if res.status_code != 201 and res.status_code != 204:
print(json.dumps(MOLD))
exit(0)
del MOLD_DIMENSIONS['key_' + str(a)]
print('round finished %s' % a)
if __name__ == '__main__':
if len(sys.argv) == 2:
setup_metrics(sys.argv)
else:
print('Usage: setup_metrics endpoint. For example:')
print(' setup_metrics http://host:9000/data_2015')
| litong01/python-monasca | kiloeyes/tests/setup_metrics.py | Python | apache-2.0 | 3,665 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numbers
import shutil
from tempfile import mkdtemp
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
import eventlet
from collections import defaultdict
from datetime import datetime
import six
from six.moves import urllib
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
from swift.common.swob import Request
from swift.container import reconciler
from swift.container.server import gen_resp_headers, ContainerController
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path, Timestamp, encode_timestamps, mkdirs
from test.debug_logger import debug_logger
from test.unit import FakeRing, fake_http_connect, patch_policies, \
DEFAULT_TEST_EC_TYPE, make_timestamp_iter
from test.unit.common.middleware import helpers
def timestamp_to_last_modified(timestamp):
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(helpers.FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings=None):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.use_replication_network = True
self.parse(listings)
self.container_ring = FakeRing()
def parse(self, listings):
listings = listings or {}
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
if six.PY2 and isinstance(path, six.text_type):
path = path.encode('utf-8')
account, container_name, obj_name = split_path(
path, 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = swob.str_to_wsgi(
container_path + '/' + obj_name)
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
# some tests setup mock listings using floats, some use
# strings, so normalize here
if isinstance(timestamp, numbers.Number):
timestamp = '%f' % timestamp
if six.PY2:
obj_name = obj_name.decode('utf-8')
timestamp = timestamp.decode('utf-8')
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name,
'last_modified': last_modified,
'hash': timestamp,
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = helpers.normalize_query_string(
'?format=json&marker=&end_marker=&prefix=')
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = helpers.normalize_query_string(
'?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(obj_name.encode('utf-8'))))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker=&prefix='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
self.tempdir = mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# the 'hash' field in object listing has the raw 'created_at' value
# which could be a composite of timestamps
timestamp_str = encode_timestamps(Timestamp(1234.20190),
Timestamp(1245.20190),
Timestamp(1256.20190),
explicit=True)
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': timestamp_str,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertIsNone(oldest_spi)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp.now().internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two')])
def test_get_container_policy_index_for_recently_split_recreated(self):
# verify that get_container_policy_index reaches same conclusion as a
# container server that receives all requests in chronological order
ts_iter = make_timestamp_iter()
ts = [next(ts_iter) for _ in range(8)]
# make 3 container replicas
device_dirs = [os.path.join(self.tempdir, str(i)) for i in range(3)]
for device_dir in device_dirs:
mkdirs(os.path.join(device_dir, 'sda1'))
controllers = [ContainerController(
{'devices': devices,
'mount_check': 'false',
'replication_server': 'true'})
for devices in device_dirs]
# initial PUT goes to all 3 replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[0].internal,
'X-Backend-Storage-Policy-Index': 0,
})
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[201, 201, 201])
# DELETE to all 3 replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='DELETE', headers={
'X-Timestamp': ts[2].internal,
})
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[204, 204, 204])
# first recreate PUT, SPI=1, goes to replicas 0 and 1
responses = []
for controller in controllers[:2]:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[3].internal,
'X-Backend-Storage-Policy-Index': 1,
})
responses.append(req.get_response(controller))
# all ok, PUT follows DELETE
self.assertEqual([resp.status_int for resp in responses],
[201, 201])
# second recreate PUT, SPI=2, goes to replicas 0 and 2
responses = []
for controller in [controllers[0], controllers[2]]:
req = Request.blank('/sda1/p/a/c', method='PUT', headers={
'X-Timestamp': ts[5].internal,
'X-Backend-Storage-Policy-Index': 2,
})
responses.append(req.get_response(controller))
# note: 409 from replica 0 because PUT follows previous PUT
self.assertEqual([resp.status_int for resp in responses],
[409, 201])
# now do a HEAD on all replicas
responses = []
for controller in controllers:
req = Request.blank('/sda1/p/a/c', method='HEAD')
responses.append(req.get_response(controller))
self.assertEqual([resp.status_int for resp in responses],
[204, 204, 204])
resp_headers = [resp.headers for resp in responses]
# replica 0 should be authoritative because it received all requests
self.assertEqual(ts[3].internal, resp_headers[0]['X-Put-Timestamp'])
self.assertEqual('1',
resp_headers[0]['X-Backend-Storage-Policy-Index'])
self.assertEqual(ts[3].internal, resp_headers[1]['X-Put-Timestamp'])
self.assertEqual('1',
resp_headers[1]['X-Backend-Storage-Policy-Index'])
self.assertEqual(ts[5].internal, resp_headers[2]['X-Put-Timestamp'])
self.assertEqual('2',
resp_headers[2]['X-Backend-Storage-Policy-Index'])
# now feed the headers from each replica to
# direct_get_container_policy_index
mock_path = 'swift.container.reconciler.direct_head_container'
random.shuffle(resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expect the same outcome as the authoritative replica 0
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertIsNone(oldest_spi)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp.now()
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6201, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertIsNone(rv)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return helpers.normalize_query_string(
"?format=json&marker=%s&end_marker=&prefix=" %
urllib.parse.quote(marker.encode('utf-8')))
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
ECStoragePolicy(1, 'one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2), ],
fake_ring_args=[{}, {'replicas': 8}])
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
self.swift = FakeInternalClient()
self.reconciler = reconciler.ContainerReconciler(
conf, logger=self.logger, swift=self.swift)
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def test_concurrency_config(self):
conf = {}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 1)
conf = {'concurrency': '10'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 10)
conf = {'concurrency': 48}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.concurrency, 48)
conf = {'concurrency': 0}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'concurrency': '-1'}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
def test_processes_config(self):
conf = {}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 0)
self.assertEqual(r.processes, 0)
conf = {'processes': '1'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 0)
self.assertEqual(r.processes, 1)
conf = {'processes': 10, 'process': '9'}
r = reconciler.ContainerReconciler(conf, self.logger, self.swift)
self.assertEqual(r.process, 9)
self.assertEqual(r.processes, 10)
conf = {'processes': -1}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'process': -1}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
conf = {'processes': 9, 'process': 9}
self.assertRaises(ValueError, reconciler.ContainerReconciler,
conf, self.logger, self.swift)
def test_init_internal_client_log_name(self):
def _do_test_init_ic_log_name(conf, exp_internal_client_log_name):
with mock.patch(
'swift.container.reconciler.InternalClient') \
as mock_ic:
reconciler.ContainerReconciler(conf)
mock_ic.assert_called_once_with(
'/etc/swift/container-reconciler.conf',
'Swift Container Reconciler', 3,
global_conf={'log_name': exp_internal_client_log_name},
use_replication_network=True)
_do_test_init_ic_log_name({}, 'container-reconciler-ic')
_do_test_init_ic_log_name({'log_name': 'my-container-reconciler'},
'my-container-reconciler-ic')
def _mock_listing(self, objects):
self.swift.parse(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', lambda: next(mock_time_iter)):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_no_concurrency(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o2"): 3724.23456,
(1, "/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o2"): 3724.23456,
})
order_recieved = []
def fake_reconcile_object(account, container, obj, q_policy_index,
q_ts, q_op, path, **kwargs):
order_recieved.append(obj)
return True
self.reconciler._reconcile_object = fake_reconcile_object
self.assertEqual(self.reconciler.concurrency, 1) # sanity
deleted_container_entries = self._run_once()
self.assertEqual(order_recieved, ['o1', 'o2'])
# process in order recieved
self.assertEqual(deleted_container_entries, [
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1'),
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o2'),
])
def test_concurrency(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o2"): 3724.23456,
(1, "/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o2"): 3724.23456,
})
order_recieved = []
def fake_reconcile_object(account, container, obj, q_policy_index,
q_ts, q_op, path, **kwargs):
order_recieved.append(obj)
if obj == 'o1':
# o1 takes longer than o2 for some reason
for i in range(10):
eventlet.sleep(0.0)
return True
self.reconciler._reconcile_object = fake_reconcile_object
self.reconciler.concurrency = 2
deleted_container_entries = self._run_once()
self.assertEqual(order_recieved, ['o1', 'o2'])
# ... and so we finish o2 first
self.assertEqual(deleted_container_entries, [
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o2'),
('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1'),
])
def test_multi_process_should_process(self):
def mkqi(a, c, o):
"make queue item"
return {
'account': a,
'container': c,
'obj': o,
}
queue = [
mkqi('a', 'c', 'o1'),
mkqi('a', 'c', 'o2'),
mkqi('a', 'c', 'o3'),
mkqi('a', 'c', 'o4'),
]
def map_should_process(process, processes):
self.reconciler.process = process
self.reconciler.processes = processes
with mock.patch('swift.common.utils.HASH_PATH_SUFFIX',
b'endcap'), \
mock.patch('swift.common.utils.HASH_PATH_PREFIX', b''):
return [self.reconciler.should_process(q_item)
for q_item in queue]
def check_process(process, processes, expected):
should_process = map_should_process(process, processes)
try:
self.assertEqual(should_process, expected)
except AssertionError as e:
self.fail('unexpected items processed for %s/%s\n%s' % (
process, processes, e))
check_process(0, 0, [True] * 4)
check_process(0, 1, [True] * 4)
check_process(0, 2, [False, True, False, False])
check_process(1, 2, [True, False, True, True])
check_process(0, 4, [False, True, False, False])
check_process(1, 4, [True, False, False, False])
check_process(2, 4, [False] * 4) # lazy
check_process(3, 4, [False, False, True, True])
queue = [mkqi('a%s' % i, 'c%s' % i, 'o%s' % i) for i in range(1000)]
items_handled = [0] * 1000
for process in range(100):
should_process = map_should_process(process, 100)
for i, handled in enumerate(should_process):
if handled:
items_handled[i] += 1
self.assertEqual([1] * 1000, items_handled)
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
ECStoragePolicy(2, 'two', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2)],
fake_ring_args=[
{'next_part_power': 1}, {}, {'next_part_power': 1}])
def test_can_reconcile_policy(self):
for policy_index, expected in ((0, False), (1, True), (2, False),
(3, False), ('apple', False),
(None, False)):
self.assertEqual(
self.reconciler.can_reconcile_policy(policy_index), expected)
@patch_policies(
[StoragePolicy(0, 'zero', is_default=True),
ECStoragePolicy(1, 'one', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=6, ec_nparity=2), ],
fake_ring_args=[{'next_part_power': 1}, {}])
def test_fail_to_move_if_ppi(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# skipped sending because policy_index 0 is in the middle of a PPI
self.assertFalse(deleted_container_entries)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(self.reconciler.stats['ppi_skip'], 1)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
if six.PY2:
obj_path = obj_name.encode('utf-8')
else:
obj_path = obj_name.encode('utf-8').decode('latin-1')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_preflight(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the HEAD blow up
self.fake_swift.storage_policy[0].register(
'HEAD', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we did some listings...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# ...but we can't even tell whether anything's misplaced or not
self.assertEqual(self.reconciler.stats['misplaced_object'], 0)
self.assertEqual(self.reconciler.stats['unavailable_destination'], 1)
# so we don't try to do any sort of move or cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to try again later
self.assertEqual(self.reconciler.stats['retry'], 1)
self.assertEqual(self.fake_swift.storage_policy[1].calls, [])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 1, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp.now()) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
| openstack/swift | test/unit/container/test_reconciler.py | Python | apache-2.0 | 96,529 |
"""Test suite for abdt_rbranchnaming."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [XB] review names that are globally known to be bad are not accepted
# [XB] tracker names that are globally known to be bad are not accepted
# [XC] names that are known to be potential reviews aren't accepted as trackers
# [XC] names that are known to be potential trackers aren't accepted as reviews
# [XD] ReviewBranches created by the scheme have the expected attributes
# [XD] ReviewBranches created by the scheme can create expected TrackerBranches
# [XD] TrackerBranches created by the scheme have the expected attributes
# [XD] there is a 1-1 relationship between tracker params and tracker names
# -----------------------------------------------------------------------------
# Tests:
# [ A] XXX: test_A_Breathing
# [XA] check_XA_Breathing
# [XB] check_XB_globally_invalid_review_tracker_names
# [XC] check_XC_potentially_valid_review_tracker_names
# [XD] check_XD_valid_reviews
# =============================================================================
from __future__ import absolute_import
import unittest
import abdt_namingtester
import abdt_rbranchnaming
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_naming(self):
return abdt_rbranchnaming.Naming()
def test_A_Breathing(self):
pass
def test_XA_Breathing(self):
abdt_namingtester.check_XA_Breathing(self)
def test_XB_globally_invalid_review_tracker_names(self):
abdt_namingtester.check_XB_globally_invalid_review_tracker_names(
self, self.make_naming())
def test_XC_potentially_valid_review_tracker_names(self):
abdt_namingtester.check_XC_potentially_valid_review_tracker_names(
self, self.make_naming())
def test_XD_valid_reviews(self):
names_to_properties = {}
for properties in abdt_namingtester.VALID_REVIEW_PROPERTIES:
name = 'r/{base}/{description}'.format(
description=properties.description,
base=properties.base)
assert name not in names_to_properties
names_to_properties[name] = properties
abdt_namingtester.check_XD_valid_reviews(
self, self.make_naming(), names_to_properties)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| valhallasw/phabricator-tools | py/abd/abdt_rbranchnaming__t.py | Python | apache-2.0 | 3,352 |
import difflib
import inflect
import itertools
import logging
import netaddr
import os
import re
import toposort
import yaml
import hotcidr.state
def inflect_a(s, p=inflect.engine()):
x = p.plural(s)
if p.compare(s, x) == 'p:s':
return s
return p.a(s)
logging.basicConfig(format='%(levelname)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
class Validator(object):
logger = logging.getLogger('validation')
info = logger.warn
warn = logger.warn
error = logger.error
fatal = logger.fatal
def load(self, x):
if x not in self.files:
try:
with open(os.path.join(self.rootdir, x)) as f:
try:
self.files[x] = hotcidr.state.load(f)
except yaml.YAMLError:
self.fatal("Invalid YAML file %s" % x)
except IOError:
self.fatal("Could not read file %s" % x)
return self.files[x]
def register_check(self, f):
if f not in self.checks:
self.checks.append(f)
else:
raise Exception("Function %s is already registered" % f.__name__)
def register_checks(self, *fs):
for f in fs:
self.register_check(f)
required_map = {}
def validate(self, wrap=True):
# TODO: spawn multiple processes
l = {f: Validator.required_map[f]
if f in Validator.required_map
else set()
for f in self.checks}
for f in toposort.toposort_flatten(l, sort=False):
if wrap:
try:
f(self)
except:
self.fatal("Unexpected exception raised by %s" %
f.__name__)
raise
else:
f(self)
def __init__(self, rootdir):
self.rootdir = rootdir
self.checks = []
self.files = {}
def has_rules(g):
for i in g:
if isinstance(i, tuple):
if len(i) > 1 and 'rules' in i[1]:
yield i
elif 'rules' in i:
yield i
def requires(*a):
def decorator(f):
Validator.required_map[f] = set(a)
return f
return decorator
def load_groups(self, forced=False):
if forced or not hasattr(self, 'groups'):
groupsdir = os.path.join(self.rootdir, 'groups')
groups = os.listdir(groupsdir)
self.groups = {}
for x in groups:
if os.path.isfile(os.path.join(groupsdir, x)):
if x.endswith('.yaml'):
self.groups[x[:-5]] = self.load(os.path.join('groups', x))
def load_boxes(self, forced=False):
if forced or not hasattr(self, 'boxes'):
self.boxes = self.load('boxes.yaml')
@requires(load_groups, load_boxes)
def find_unused_groups(self):
#TODO: include groups used in 'location' field
used = set(itertools.chain(*(b['groups'] for b in self.boxes.values()
if 'groups' in b)))
for g in set(self.groups.keys()) - used:
self.info("Group %s is unused" % g)
@requires(load_groups, load_boxes)
def validate_groups(self):
used = set(itertools.chain(*(b['groups'] for b in self.boxes.values()
if 'groups' in b)))
valid_groups = set(self.groups.keys())
for g in used - valid_groups:
guess = difflib.get_close_matches(g, valid_groups)
if guess:
guess = " (Did you mean %s?)" % guess[0]
else:
guess = ""
self.fatal("%s is not defined%s" % (g, guess))
@requires(load_groups)
def validate_group_names(self):
valid_chars = set(
'abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
' ._-:/()#,@[]+=&;{}!$*'
)
for name in self.groups.keys():
if any(c not in valid_chars for c in name):
self.fatal("%s is not a valid group name" % name)
@requires(load_boxes)
def validate_aws_instance_id(self):
for name in self.boxes.keys():
if not re.match(r'^i\-[0-9a-f]{8}$', name):
self.fatal("Instance ID %s is not a valid AWS instance ID" % name)
@requires(load_groups)
def validate_aws_group_id(self):
seen = {}
for group_name, group in self.groups.items():
if 'id' in group:
name = group['id']
if not re.match(r'^sg\-[0-9a-f]{8}$', name):
self.fatal("%s has an invalid AWS group ID" % group_name)
elif name in seen:
if seen[name]:
self.fatal("%s has a duplicate AWS group ID" % seen[name])
seen[name] = False
self.fatal("%s has a duplicate AWS group ID" % group_name)
else:
seen[name] = group_name
@requires(load_groups)
def validate_protocols(self):
for group_name, group in has_rules(self.groups.iteritems()):
for rule_num, rule in enumerate(group['rules'], 1):
if 'protocol' not in rule:
self.error("Rule %d in %s is missing a protocol" %
(rule_num, group_name))
elif rule['protocol'] == '-1':
self.error("Rule %d in %s has an invalid protocol" %
(rule_num, group_name))
@requires(load_groups)
def validate_ports(self):
#TODO: handle ICMP fromport
def port(x, default=-1):
try:
r = int(x)
if 1 <= r <= 65535:
return r
except ValueError:
pass
for group_name, group in has_rules(self.groups.iteritems()):
for rule_num, rule in enumerate(group['rules'], 1):
valid = True
if 'fromport' not in rule:
self.error("Rule %d in %s is missing a fromport" %
(rule_num, group_name))
valid = False
if 'toport' not in rule:
self.error("Rule %d in %s is missing a toport" %
(rule_num, group_name))
valid = False
if valid:
fromport = port(rule['fromport'])
toport = port(rule['toport'])
valid = True
if not fromport:
self.error("Rule %d in %s has an invalid fromport" %
(rule_num, group_name))
valid = False
if not toport:
self.error("Rule %d in %s has an invalid toport" %
(rule_num, group_name))
valid = False
if valid:
if fromport > toport:
self.error("Rule %d in %s has an invalid port range" %
(rule_num, group_name))
elif (toport - fromport) >= 100:
self.warn("Rule %d in %s has a large port range" %
(rule_num, group_name))
@requires(load_groups)
def validate_rule_fields(self):
for group_name, group in has_rules(self.groups.iteritems()):
for rule_num, rule in enumerate(group['rules'], 1):
for field in ('description',):
if field not in rule:
self.warn("Rule %d in %s is missing %s" %
(rule_num, group_name, inflect_a(field)))
@requires(load_groups)
def validate_group_fields(self):
for group_name, group in self.groups.iteritems():
for field in ('description', 'rules'):
if field not in group:
self.warn("%s is missing %s" % (group_name, inflect_a(field)))
@requires(load_boxes)
def validate_instance_fields(self):
for box_id, box in self.boxes.iteritems():
for field in ('ip', 'domain', 'groups'):
if field not in box:
self.warn("Box %s is missing %s" %
(box_id, inflect_a(field)))
@requires(load_groups)
def validate_locations(self):
valid_groups = set(self.groups.keys())
for group_name, group in has_rules(self.groups.iteritems()):
for rule_num, rule in enumerate(group['rules'], 1):
if 'location' in rule:
if rule['location'] not in valid_groups:
try:
ip = netaddr.IPNetwork(rule['location'])
if str(ip.cidr) != rule['location']:
self.warn("Location for rule %d in %s "
"will be interpreted as %s" %
(rule_num, group_name, ip.cidr))
except netaddr.AddrFormatError:
self.error("Rule %d in %s has an invalid location" %
(rule_num, group_name))
else:
self.error("Rule %d in %s is missing a location" %
(rule_num, group_name))
| ViaSat/hotcidr | HotCIDR/hotcidr/validation.py | Python | apache-2.0 | 8,940 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base abstract class for metadata builders."""
import abc
_ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()})
class MetadataBuilder(_ABC):
"""Abstract base class for metadata builders."""
@abc.abstractmethod
def get_metadata(self):
"""Returns the current metadata as a dictionary."""
@abc.abstractmethod
def get_metadata_protobuf(self):
"""Returns the current metadata as ExplanationMetadata protobuf"""
| googleapis/python-aiplatform | google/cloud/aiplatform/explain/metadata/metadata_builder.py | Python | apache-2.0 | 1,054 |
#!/usr/bin/env python
"""
Demonstrate use of pysnmp walks
"""
import sys
import re
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
devip = sys.argv.pop(1)
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.nextCmd(
cmdgen.CommunityData('server', 'galileo', 1),
cmdgen.UdpTransportTarget((devip, 161)),
cmdgen.MibVariable('IF-MIB', '').loadMibs(),
lexicographicMode=True, maxRows=150
)
if errorIndication:
print errorIndication
else:
if errorStatus:
print '%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1] or '?'
)
else:
ifdescr = []
inoctets = []
outoctets = []
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
np = name.prettyPrint()
vp = val.prettyPrint()
if re.search(r"ifDescr\.\d+", np):
ifdescr.append(vp)
continue
if re.search(r"ifInOctets\.\d+", np):
inoctets.append(vp)
continue
if re.search(r"ifOutOctets\.\d+", np):
outoctets.append(vp)
for l in zip(ifdescr, inoctets, outoctets):
print "%s\t%s\t%s" %(l[0], l[1], l[2])
| patrebert/pynet_cert | class2/walk2.py | Python | apache-2.0 | 1,368 |
#! /usr/bin/env python
# Very basic script template. Use this to build new
# examples for use in the api-kickstart repository
#
""" Copyright 2015 Akamai Technologies, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import requests, logging, json
from http_calls import EdgeGridHttpCaller
from random import randint
from akamai.edgegrid import EdgeGridAuth
from config import EdgeGridConfig
from urlparse import urljoin
import urllib
session = requests.Session()
debug = False
verbose = False
section_name = "cloudlet"
config = EdgeGridConfig({"verbose":debug},section_name)
if hasattr(config, "debug") and config.debug:
debug = True
if hasattr(config, "verbose") and config.verbose:
verbose = True
# Set the config options
session.auth = EdgeGridAuth(
client_token=config.client_token,
client_secret=config.client_secret,
access_token=config.access_token
)
# Set the baseurl based on config.host
baseurl = '%s://%s/' % ('https', config.host)
httpCaller = EdgeGridHttpCaller(session, debug, verbose, baseurl)
if __name__ == "__main__":
# Get the list of cloudlets to pick the one we want to use
endpoint_result = httpCaller.getResult("/cloudlets/api/v2/cloudlet-info")
# Result for edge redirector:
# {
# "location": "/cloudlets/api/v2/cloudlet-info/2",
# "cloudletId": 2,
# "cloudletCode": "SA",
# "apiVersion": "2.0",
# "cloudletName": "SAASACCESS"
#},
# Get the group ID for the cloudlet we're looking to create
endpoint_result = httpCaller.getResult("/cloudlets/api/v2/group-info")
# Result for group info:
# "groupName": "API Bootcamp",
# "location": "/cloudlets/api/v2/group-info/77649",
# "parentId": 64867,
# "capabilities": [
# {
# "cloudletId": 0,
# "cloudletCode": "ER",
# "capabilities": [
# "View",
# "Edit",
# "Activate",
# "Internal",
# "AdvancedEdit"
# ]
# },
sample_post_body = {
"cloudletId": 0,
"groupId": 77649,
"name": "APIBootcampERv1",
"description": "Testing the creation of a policy"
}
sample_post_result = httpCaller.postResult('/cloudlets/api/v2/policies', json.dumps(sample_post_body))
#{
#"cloudletCode": "SA",
#"cloudletId": 2,
#"name": "APIBootcampEdgeRedirect",
#"propertyName": null,
#"deleted": false,
#"lastModifiedDate": 1458765299155,
#"description": "Testing the creation of a policy",
#"apiVersion": "2.0",
#"lastModifiedBy": "advocate2",
#"serviceVersion": null,
#"createDate": 1458765299155,
#"location": "/cloudlets/api/v2/policies/11434",
#"createdBy": "advocate2",
#"activations": [
#{
#"serviceVersion": null,
#"policyInfo": {
#"status": "inactive",
#"name": "APIBootcampEdgeRedirect",
#"statusDetail": null,
#"detailCode": 0,
#"version": 0,
#"policyId": 11434,
#"activationDate": 0,
#"activatedBy": null
#},
#"network": "prod",
#"apiVersion": "2.0",
#"propertyInfo": null
#},
#{
#"serviceVersion": null,
#"policyInfo": {
#"status": "inactive",
#"name": "APIBootcampEdgeRedirect",
#"statusDetail": null,
#"detailCode": 0,
#"version": 0,
#"policyId": 11434,
#"activationDate": 0,
#"activatedBy": null
#},
#"network": "staging",
# "apiVersion": "2.0",
#"propertyInfo": null
#}
#],
# "groupId": 77649,
# "policyId": 11434 <<<<<<<<<<<
# }
# Activate by associating with a specific property
sample_post_url = "/cloudlets/api/v2/policies/11442/versions/1/activations"
sample_post_body = {
"network": "staging",
"additionalPropertyNames": [
"akamaiapibootcamp.com"
]
}
sample_post_result = httpCaller.postResult(sample_post_url, json.dumps(sample_post_body))
# Next, add the behavior for cloudlets
# PUT the update to activate the cloudlet
| dshafik/api-kickstart | examples/python/cloudlet_edge_redirector.py | Python | apache-2.0 | 4,551 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python sequence, tuple (including `namedtuple`), or dict that can contain
further sequences, tuples, and dicts.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python.platform import tf_logging as _tf_logging
from tensorflow.python.util.all_util import remove_undocumented
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, or
`collections.NamedDict`.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# For dictionaries with their values extracted, we always order the values
# by sorting the keys first (see note below). This code allows recreating
# e.g., `OrderedDict`s with their original key ordering.
result = dict(zip(sorted(_six.iterkeys(instance)), args))
return type(instance)((key, result[key]) for key in _six.iterkeys(instance))
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
if isinstance(iterable, dict):
# Iterate through dictionaries in a deterministic order. Note: we
# intentionally ignore the order in an `OrderedDict` because of the
# potential to introduce bugs if the user mixes ordered and plain dicts with
# the same keys. (This is based on experience.)
for key in sorted(_six.iterkeys(iterable)):
yield iterable[key]
else:
for value in iterable:
yield value
def _yield_flat_nest(nest):
for n in _yield_value(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
# Used by `_warn_once` to remember which warning messages have been given.
_ALREADY_WARNED = {}
def _warn_once(message):
"""Logs a warning message, once per unique string."""
if message not in _ALREADY_WARNED:
_ALREADY_WARNED[message] = True
_tf_logging.warning(message)
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence or a
dict.
"""
if isinstance(seq, dict):
return True
if isinstance(seq, set):
_warn_once("Sets are not currently considered sequences, but this may "
"change in the future, so consider avoiding using them.")
return (isinstance(seq, _collections.Sequence)
and not isinstance(seq, _six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, tuple, or dict, then returns a single-element
list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object. Note, numpy
arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
if is_sequence(nest):
return list(_yield_flat_nest(nest))
else:
return [nest]
def _recursive_assert_same_structure(nest1, nest2, check_types):
"""Helper function for `assert_same_structure`."""
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure.\n\n"
"First structure: %s\n\nSecond structure: %s." % (nest1, nest2))
if not is_sequence_nest1:
return # finished checking
if check_types:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
if isinstance(nest1, dict):
keys1 = set(_six.iterkeys(nest1))
keys2 = set(_six.iterkeys(nest2))
if keys1 != keys2:
raise ValueError(
"The two dictionaries don't have the same set of keys. First "
"structure has keys {}, while second structure has keys {}."
.format(keys1, keys2))
nest1_as_sequence = [n for n in _yield_value(nest1)]
nest2_as_sequence = [n for n in _yield_value(nest2)]
for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence):
_recursive_assert_same_structure(n1, n2, check_types)
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well, including the keys of dictionaries. If set to `False`, for example
a list and a tuple of objects will look the same if they have the same
size.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements.\n\nFirst structure (%i elements): %s\n\n"
"Second structure (%i elements): %s"
% (len_nest1, nest1, len_nest2, nest2))
_recursive_assert_same_structure(nest1, nest2, check_types)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered as scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
_allowed_symbols = [
"assert_same_structure",
"is_sequence",
"flatten",
"flatten_dict_items",
"pack_sequence_as",
"map_structure",
"assert_shallow_structure",
"flatten_up_to",
"map_structure_up_to",
]
remove_undocumented(__name__, _allowed_symbols)
| tiagofrepereira2012/tensorflow | tensorflow/python/util/nest.py | Python | apache-2.0 | 21,628 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Tensorboard debugger data plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
from tensorflow.python.platform import test
from tensorflow.tensorboard.plugins.debugger import plugin as debugger_plugin
class FakeRequest(object):
"""A fake shell of a werkzeug request.
We fake instead of using a real request because the real request requires a
WSGI environment.
"""
def __init__(self, method, post_data):
"""Constructs a fake request, a simple version of a werkzeug request.
Args:
method: The uppercase method of the request, ie POST.
post_data: A dictionary of POST data.
"""
self.method = method
self.form = post_data
class DebuggerPluginTest(test.TestCase):
def setUp(self):
self.debugger_plugin = debugger_plugin.DebuggerPlugin()
self.unused_run_paths = {}
self.unused_logdir = '/logdir'
def testHealthPillsRouteProvided(self):
"""Tests that the plugin offers the route for requesting health pills."""
apps = self.debugger_plugin.get_plugin_apps(self.unused_run_paths,
self.unused_logdir)
self.assertIn('/health_pills', apps)
self.assertIsInstance(apps['/health_pills'], collections.Callable)
def testGetRequestsUnsupported(self):
"""Tests that GET requests are unsupported."""
request = FakeRequest('GET', {
'node_names': json.dumps(['layers/Matmul', 'logits/Add']),
})
self.assertEqual(
405,
self.debugger_plugin._serve_health_pills_helper(request).status_code)
def testRequestsWithoutProperPostKeyUnsupported(self):
"""Tests that requests lacking the node_names POST key are unsupported."""
request = FakeRequest('POST', {})
self.assertEqual(
400,
self.debugger_plugin._serve_health_pills_helper(request).status_code)
def testRequestsWithBadJsonUnsupported(self):
"""Tests that requests with undecodable JSON are unsupported."""
request = FakeRequest('POST',
{'node_names': 'some obviously non JSON text',})
self.assertEqual(
400,
self.debugger_plugin._serve_health_pills_helper(request).status_code)
def testRequestsWithNonListPostDataUnsupported(self):
"""Tests that requests with loads lacking lists of ops are unsupported."""
request = FakeRequest('POST', {
'node_names': json.dumps({
'this is a dict': 'and not a list.'
}),
})
self.assertEqual(
400,
self.debugger_plugin._serve_health_pills_helper(request).status_code)
if __name__ == '__main__':
test.main()
| manjunaths/tensorflow | tensorflow/tensorboard/plugins/debugger/plugin_test.py | Python | apache-2.0 | 3,406 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron import context as n_ctx
from sqlalchemy.orm import exc as orm_exc
from gbpservice.neutron.db.grouppolicy.extensions import group_proxy_db
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_extension_driver_api as test_ext_base)
class ExtensionDriverTestCaseMixin(object):
def test_proxy_group_extension(self):
l3p = self.create_l3_policy()['l3_policy']
self.assertEqual('192.168.0.0/16', l3p['proxy_ip_pool'])
self.assertEqual(28, l3p['proxy_subnet_prefix_length'])
l2p = self.create_l2_policy(l3_policy_id=l3p['id'])['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.assertIsNone(ptg['proxy_group_id'])
self.assertIsNone(ptg['proxied_group_id'])
self.assertIsNone(ptg['proxy_type'])
# Verify Default L3P pool mapping on show
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual('192.168.0.0/16', l3p['proxy_ip_pool'])
self.assertEqual(28, l3p['proxy_subnet_prefix_length'])
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'])['policy_target_group']
self.assertIsNone(ptg_proxy['proxy_group_id'])
self.assertEqual(ptg['id'], ptg_proxy['proxied_group_id'])
self.assertEqual('l3', ptg_proxy['proxy_type'])
# Verify relationship added
ptg = self.show_policy_target_group(ptg['id'])['policy_target_group']
self.assertEqual(ptg_proxy['id'], ptg['proxy_group_id'])
self.assertIsNone(ptg['proxied_group_id'])
pt = self.create_policy_target(
policy_target_group_id=ptg_proxy['id'])['policy_target']
self.assertFalse(pt['proxy_gateway'])
self.assertFalse(pt['group_default_gateway'])
pt = self.create_policy_target(
policy_target_group_id=ptg_proxy['id'],
proxy_gateway=True, group_default_gateway=True)['policy_target']
self.assertTrue(pt['proxy_gateway'])
self.assertTrue(pt['group_default_gateway'])
pt = self.show_policy_target(pt['id'])['policy_target']
self.assertTrue(pt['proxy_gateway'])
self.assertTrue(pt['group_default_gateway'])
def test_preexisting_pt(self):
ptg = self.create_policy_target_group()['policy_target_group']
pt = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self.assertTrue('proxy_gateway' in pt)
self.assertTrue('group_default_gateway' in pt)
# Forcefully delete the entry in the proxy table, and verify that it's
# fixed by the subsequent GET
admin_context = n_ctx.get_admin_context()
mapping = admin_context.session.query(
group_proxy_db.ProxyGatewayMapping).filter_by(
policy_target_id=pt['id']).one()
admin_context.session.delete(mapping)
query = admin_context.session.query(
group_proxy_db.ProxyGatewayMapping).filter_by(
policy_target_id=pt['id'])
self.assertRaises(orm_exc.NoResultFound, query.one)
# Showing the object just ignores the extension
pt = self.show_policy_target(pt['id'],
expected_res_status=200)['policy_target']
self.assertFalse('proxy_gateway' in pt)
self.assertFalse('group_default_gateway' in pt)
# Updating the object just ignores the extension
pt = self.update_policy_target(
pt['id'], name='somenewname',
expected_res_status=200)['policy_target']
self.assertEqual('somenewname', pt['name'])
self.assertFalse('proxy_gateway' in pt)
self.assertFalse('group_default_gateway' in pt)
def test_proxy_group_multiple_proxies(self):
# same PTG proxied multiple times will fail
ptg = self.create_policy_target_group()['policy_target_group']
self.create_policy_target_group(proxied_group_id=ptg['id'])
# Second proxy will fail
res = self.create_policy_target_group(proxied_group_id=ptg['id'],
expected_res_status=400)
self.assertEqual('InvalidProxiedGroup', res['NeutronError']['type'])
def test_proxy_group_chain_proxy(self):
# Verify no error is raised when chaining multiple proxy PTGs
ptg0 = self.create_policy_target_group()['policy_target_group']
ptg1 = self.create_policy_target_group(
proxied_group_id=ptg0['id'],
expected_res_status=201)['policy_target_group']
self.create_policy_target_group(proxied_group_id=ptg1['id'],
expected_res_status=201)
def test_proxy_group_no_update(self):
ptg0 = self.create_policy_target_group()['policy_target_group']
ptg1 = self.create_policy_target_group()['policy_target_group']
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg0['id'])['policy_target_group']
self.update_policy_target_group(
ptg_proxy['id'], proxied_group_id=ptg1['id'],
expected_res_status=400)
def test_different_proxy_type(self):
ptg = self.create_policy_target_group()['policy_target_group']
ptg_proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'], proxy_type='l2')['policy_target_group']
self.assertEqual('l2', ptg_proxy['proxy_type'])
ptg_proxy = self.show_policy_target_group(
ptg_proxy['id'])['policy_target_group']
self.assertEqual('l2', ptg_proxy['proxy_type'])
def test_proxy_type_fails(self):
ptg = self.create_policy_target_group()['policy_target_group']
res = self.create_policy_target_group(proxy_type='l2',
expected_res_status=400)
self.assertEqual('ProxyTypeSetWithoutProxiedPTG',
res['NeutronError']['type'])
self.create_policy_target_group(proxied_group_id=ptg['id'],
proxy_type='notvalid',
expected_res_status=400)
def test_proxy_gateway_no_proxy(self):
ptg = self.create_policy_target_group()['policy_target_group']
res = self.create_policy_target(
policy_target_group_id=ptg['id'], proxy_gateway=True,
expected_res_status=400)
self.assertEqual('InvalidProxyGatewayGroup',
res['NeutronError']['type'])
def test_proxy_pool_invalid_prefix_length(self):
l3p = self.create_l3_policy(proxy_subnet_prefix_length=29)['l3_policy']
res = self.update_l3_policy(l3p['id'], proxy_subnet_prefix_length=32,
expected_res_status=400)
self.assertEqual('InvalidDefaultSubnetPrefixLength',
res['NeutronError']['type'])
# Verify change didn't persist
l3p = self.show_l3_policy(l3p['id'])['l3_policy']
self.assertEqual(29, l3p['proxy_subnet_prefix_length'])
# Verify it fails in creation
res = self.create_l3_policy(
proxy_subnet_prefix_length=32, expected_res_status=400)
self.assertEqual('InvalidDefaultSubnetPrefixLength',
res['NeutronError']['type'])
def test_proxy_pool_invalid_version(self):
# proxy_ip_pool is of a different version
res = self.create_l3_policy(ip_version=6, ip_pool='1::1/16',
proxy_ip_pool='192.168.0.0/16',
expected_res_status=400)
self.assertEqual('InvalidIpPoolVersion', res['NeutronError']['type'])
class ExtensionDriverTestCase(test_ext_base.ExtensionDriverTestBase,
ExtensionDriverTestCaseMixin):
_extension_drivers = ['proxy_group']
_extension_path = None
| jiahaoliang/group-based-policy | gbpservice/neutron/tests/unit/services/grouppolicy/test_group_proxy_extension.py | Python | apache-2.0 | 8,529 |
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pull in helpers that 'charms_openstack.plugins' will export
from charms_openstack.plugins.adapters import (
CephRelationAdapter,
)
from charms_openstack.plugins.classes import (
BaseOpenStackCephCharm,
CephCharm,
PolicydOverridePlugin,
)
from charms_openstack.plugins.trilio import (
TrilioVaultCharm,
TrilioVaultSubordinateCharm,
TrilioVaultCharmGhostAction,
)
__all__ = (
"BaseOpenStackCephCharm",
"CephCharm",
"CephRelationAdapter",
"PolicydOverridePlugin",
"TrilioVaultCharm",
"TrilioVaultSubordinateCharm",
"TrilioVaultCharmGhostAction",
)
| coreycb/charms.openstack | charms_openstack/plugins/__init__.py | Python | apache-2.0 | 1,179 |
# Copyright 2015, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import scsi_mapper as pvm_smap
from taskflow import task
from taskflow.types import failure as task_fail
from nova import exception
from nova.virt.powervm import media
from nova.virt.powervm import mgmt
LOG = logging.getLogger(__name__)
class CreateDiskForImg(task.Task):
"""The Task to create the disk from an image in the storage."""
def __init__(self, disk_dvr, context, instance, image_meta):
"""Create the Task.
Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr
create_disk_from_image method.
:param disk_dvr: The storage driver.
:param context: The context passed into the driver method.
:param instance: The nova instance.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
"""
super(CreateDiskForImg, self).__init__(
name='create_disk_from_img', provides='disk_dev_info')
self.disk_dvr = disk_dvr
self.instance = instance
self.context = context
self.image_meta = image_meta
def execute(self):
return self.disk_dvr.create_disk_from_image(
self.context, self.instance, self.image_meta)
def revert(self, result, flow_failures):
# If there is no result, or its a direct failure, then there isn't
# anything to delete.
if result is None or isinstance(result, task_fail.Failure):
return
# Run the delete. The result is a single disk. Wrap into list
# as the method works with plural disks.
try:
self.disk_dvr.delete_disks([result])
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Disk deletion failed during revert. Ignoring.",
instance=self.instance)
class AttachDisk(task.Task):
"""The task to attach the disk to the instance."""
def __init__(self, disk_dvr, instance, stg_ftsk):
"""Create the Task for the attach disk to instance method.
Requires disk info through requirement of disk_dev_info (provided by
crt_disk_from_img)
:param disk_dvr: The disk driver.
:param instance: The nova instance.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
"""
super(AttachDisk, self).__init__(
name='attach_disk', requires=['disk_dev_info'])
self.disk_dvr = disk_dvr
self.instance = instance
self.stg_ftsk = stg_ftsk
def execute(self, disk_dev_info):
self.disk_dvr.attach_disk(self.instance, disk_dev_info, self.stg_ftsk)
def revert(self, disk_dev_info, result, flow_failures):
try:
self.disk_dvr.detach_disk(self.instance)
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Disk detach failed during revert. Ignoring.",
instance=self.instance)
class DetachDisk(task.Task):
"""The task to detach the disk storage from the instance."""
def __init__(self, disk_dvr, instance):
"""Creates the Task to detach the storage adapters.
Provides the stor_adpt_mappings. A list of pypowervm
VSCSIMappings or VFCMappings (depending on the storage adapter).
:param disk_dvr: The DiskAdapter for the VM.
:param instance: The nova instance.
"""
super(DetachDisk, self).__init__(
name='detach_disk', provides='stor_adpt_mappings')
self.instance = instance
self.disk_dvr = disk_dvr
def execute(self):
return self.disk_dvr.detach_disk(self.instance)
class DeleteDisk(task.Task):
"""The task to delete the backing storage."""
def __init__(self, disk_dvr):
"""Creates the Task to delete the disk storage from the system.
Requires the stor_adpt_mappings.
:param disk_dvr: The DiskAdapter for the VM.
"""
super(DeleteDisk, self).__init__(
name='delete_disk', requires=['stor_adpt_mappings'])
self.disk_dvr = disk_dvr
def execute(self, stor_adpt_mappings):
self.disk_dvr.delete_disks(stor_adpt_mappings)
class CreateAndConnectCfgDrive(task.Task):
"""The task to create the configuration drive."""
def __init__(self, adapter, instance, injected_files,
network_info, stg_ftsk, admin_pass=None):
"""Create the Task that creates and connects the config drive.
Requires the 'mgmt_cna'
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance
:param injected_files: A list of file paths that will be injected into
the ISO.
:param network_info: The network_info from the nova spawn method.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
:param admin_pass (Optional, Default None): Password to inject for the
VM.
"""
super(CreateAndConnectCfgDrive, self).__init__(
name='cfg_drive', requires=['mgmt_cna'])
self.adapter = adapter
self.instance = instance
self.injected_files = injected_files
self.network_info = network_info
self.stg_ftsk = stg_ftsk
self.ad_pass = admin_pass
self.mb = None
def execute(self, mgmt_cna):
self.mb = media.ConfigDrivePowerVM(self.adapter)
self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
self.network_info, self.stg_ftsk,
admin_pass=self.ad_pass, mgmt_cna=mgmt_cna)
def revert(self, mgmt_cna, result, flow_failures):
# No media builder, nothing to do
if self.mb is None:
return
# Delete the virtual optical media. We don't care if it fails
try:
self.mb.dlt_vopt(self.instance, self.stg_ftsk)
except pvm_exc.Error:
LOG.exception('VOpt removal (as part of reversion) failed.',
instance=self.instance)
class DeleteVOpt(task.Task):
"""The task to delete the virtual optical."""
def __init__(self, adapter, instance, stg_ftsk=None):
"""Creates the Task to delete the instance's virtual optical media.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
:param stg_ftsk: FeedTask to defer storage connectivity operations.
"""
super(DeleteVOpt, self).__init__(name='vopt_delete')
self.adapter = adapter
self.instance = instance
self.stg_ftsk = stg_ftsk
def execute(self):
media_builder = media.ConfigDrivePowerVM(self.adapter)
media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
class InstanceDiskToMgmt(task.Task):
"""The task to connect an instance's disk to the management partition."
This task will connect the instance's disk to the management partition and
discover it. We do these two pieces together because their reversion
happens in the same order.
"""
def __init__(self, disk_dvr, instance):
"""Create the Task for connecting boot disk to mgmt partition.
Provides:
stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
connected.
vios_wrap: The Virtual I/O Server wrapper from which the storage
element was mapped.
disk_path: The local path to the mapped-and-discovered device, e.g.
'/dev/sde'.
:param disk_dvr: The disk driver.
:param instance: The nova instance whose boot disk is to be connected.
"""
super(InstanceDiskToMgmt, self).__init__(
name='instance_disk_to_mgmt',
provides=['stg_elem', 'vios_wrap', 'disk_path'])
self.disk_dvr = disk_dvr
self.instance = instance
self.stg_elem = None
self.vios_wrap = None
self.disk_path = None
def execute(self):
"""Map the instance's boot disk and discover it."""
# Search for boot disk on the NovaLink partition.
if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
dev_name = self.disk_dvr.get_bootdisk_path(
self.instance, self.disk_dvr.mp_uuid)
if dev_name is not None:
return None, None, dev_name
self.stg_elem, self.vios_wrap = (
self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
new_maps = pvm_smap.find_maps(
self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
stg_elem=self.stg_elem)
if not new_maps:
raise exception.NewMgmtMappingNotFoundException(
stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)
# new_maps should be length 1, but even if it's not - i.e. we somehow
# matched more than one mapping of the same dev to the management
# partition from the same VIOS - it is safe to use the first one.
mapping = new_maps[0]
# Scan the SCSI bus, discover the disk, find its canonical path.
LOG.info("Discovering device and path for mapping of %(dev_name)s "
"on the management partition.",
{'dev_name': self.stg_elem.name}, instance=self.instance)
self.disk_path = mgmt.discover_vscsi_disk(mapping)
return self.stg_elem, self.vios_wrap, self.disk_path
def revert(self, result, flow_failures):
"""Unmap the disk and then remove it from the management partition.
We use this order to avoid rediscovering the device in case some other
thread scans the SCSI bus between when we remove and when we unmap.
"""
if self.vios_wrap is None or self.stg_elem is None:
# We never even got connected - nothing to do.
return
LOG.warning("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vioname)s.",
{'disk_name': self.stg_elem.name,
'vioname': self.vios_wrap.name}, instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
self.stg_elem.name)
if self.disk_path is None:
# We did not discover the disk - nothing else to do.
return
LOG.warning("Removing disk %(dpath)s from the management partition.",
{'dpath': self.disk_path}, instance=self.instance)
try:
mgmt.remove_block_dev(self.disk_path)
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Remove disk failed during revert. Ignoring.",
instance=self.instance)
class RemoveInstanceDiskFromMgmt(task.Task):
"""Unmap and remove an instance's boot disk from the mgmt partition."""
def __init__(self, disk_dvr, instance):
"""Create task to unmap and remove an instance's boot disk from mgmt.
Requires (from InstanceDiskToMgmt):
stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
connected.
vios_wrap: The Virtual I/O Server wrapper.
(pypowervm.wrappers.virtual_io_server.VIOS) from which the
storage element was mapped.
disk_path: The local path to the mapped-and-discovered device, e.g.
'/dev/sde'.
:param disk_dvr: The disk driver.
:param instance: The nova instance whose boot disk is to be connected.
"""
self.disk_dvr = disk_dvr
self.instance = instance
super(RemoveInstanceDiskFromMgmt, self).__init__(
name='remove_inst_disk_from_mgmt',
requires=['stg_elem', 'vios_wrap', 'disk_path'])
def execute(self, stg_elem, vios_wrap, disk_path):
"""Unmap and remove an instance's boot disk from the mgmt partition.
Input parameters ('requires') provided by InstanceDiskToMgmt task.
:param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
to be disconnected.
:param vios_wrap: The Virtual I/O Server wrapper from which the
mapping is to be removed.
:param disk_path: The local path to the disk device to be removed, e.g.
'/dev/sde'
"""
# stg_elem is None if boot disk was not mapped to management partition.
if stg_elem is None:
return
LOG.info("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vios_name)s.",
{'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
LOG.info("Removing disk %(disk_path)s from the management partition.",
{'disk_path': disk_path}, instance=self.instance)
mgmt.remove_block_dev(disk_path)
| phenoxim/nova | nova/virt/powervm/tasks/storage.py | Python | apache-2.0 | 14,010 |
"""
Class representing an exponential distribution, allowing us to sample from it.
"""
from numpy.random import exponential
# Exponential draws
def exponential_draw(lambdax):
scale = 1.0 / lambdax
return exponential(scale=scale,size=None)
'''
# Do 1000 draws and plot them
import matplotlib.pyplot as plt
import numpy as np
scale = 2.
s = [exponential_draw(1./scale) for i in range(0,1000)]
s2 = np.random.exponential(scale, 1000)
count, bins, ignored = plt.hist(s, 50, normed=True)
count, bins, ignored = plt.hist(s2, 50, normed=True)
plt.show()
''' | ThomasBrouwer/BNMTF | code/models/distributions/exponential.py | Python | apache-2.0 | 569 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for an Oppia exploration."""
import datetime
from constants import constants
import core.storage.base_model.gae_models as base_models
import core.storage.user.gae_models as user_models
import feconf
from google.appengine.ext import ndb
class ExplorationSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for an exploration snapshot."""
pass
class ExplorationSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of an exploration snapshot."""
pass
class ExplorationModel(base_models.VersionedModel):
"""Versioned storage model for an Oppia exploration.
This class should only be imported by the exploration services file
and the exploration model test file.
"""
SNAPSHOT_METADATA_CLASS = ExplorationSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = ExplorationSnapshotContentModel
ALLOW_REVERT = True
# What this exploration is called.
title = ndb.StringProperty(required=True)
# The category this exploration belongs to.
category = ndb.StringProperty(required=True, indexed=True)
# The objective of this exploration.
objective = ndb.TextProperty(default='', indexed=False)
# The ISO 639-1 code for the language this exploration is written in.
language_code = ndb.StringProperty(
default=constants.DEFAULT_LANGUAGE_CODE, indexed=True)
# Tags (topics, skills, concepts, etc.) associated with this
# exploration.
tags = ndb.StringProperty(repeated=True, indexed=True)
# A blurb for this exploration.
blurb = ndb.TextProperty(default='', indexed=False)
# 'Author notes' for this exploration.
author_notes = ndb.TextProperty(default='', indexed=False)
# The version of the states blob schema.
states_schema_version = ndb.IntegerProperty(
required=True, default=0, indexed=True)
# The name of the initial state of this exploration.
init_state_name = ndb.StringProperty(required=True, indexed=False)
# A dict representing the states of this exploration. This dict should
# not be empty.
states = ndb.JsonProperty(default={}, indexed=False)
# The dict of parameter specifications associated with this exploration.
# Each specification is a dict whose keys are param names and whose values
# are each dicts with a single key, 'obj_type', whose value is a string.
param_specs = ndb.JsonProperty(default={}, indexed=False)
# The list of parameter changes to be performed once at the start of a
# reader's encounter with an exploration.
param_changes = ndb.JsonProperty(repeated=True, indexed=False)
# A boolean indicating whether automatic text-to-speech is enabled in
# this exploration.
auto_tts_enabled = ndb.BooleanProperty(default=True, indexed=True)
# A boolean indicating whether correctness feedback is enabled in this
# exploration.
correctness_feedback_enabled = ndb.BooleanProperty(
default=False, indexed=True)
# DEPRECATED in v2.0.0.rc.2. Do not use. Retaining it here because deletion
# caused GAE to raise an error on fetching a specific version of the
# exploration model.
# TODO(sll): Fix this error and remove this property.
skill_tags = ndb.StringProperty(repeated=True, indexed=True)
# DEPRECATED in v2.0.1. Do not use.
# TODO(sll): Remove this property from the model.
default_skin = ndb.StringProperty(default='conversation_v1')
# DEPRECATED in v2.5.4. Do not use.
skin_customizations = ndb.JsonProperty(indexed=False)
@classmethod
def get_exploration_count(cls):
"""Returns the total number of explorations."""
return cls.get_all().count()
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(ExplorationModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
exp_rights = ExplorationRightsModel.get_by_id(self.id)
# TODO(msl): test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when explorations
# are changed).
exploration_commit_log = ExplorationCommitLogEntryModel.create(
self.id, self.version, committer_id, committer_username,
commit_type, commit_message, commit_cmds, exp_rights.status,
exp_rights.community_owned
)
exploration_commit_log.exploration_id = self.id
exploration_commit_log.put()
class ExplorationRightsSnapshotMetadataModel(
base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for an exploration rights snapshot."""
pass
class ExplorationRightsSnapshotContentModel(
base_models.BaseSnapshotContentModel):
"""Storage model for the content of an exploration rights snapshot."""
pass
class ExplorationRightsModel(base_models.VersionedModel):
"""Storage model for rights related to an exploration.
The id of each instance is the id of the corresponding exploration.
"""
SNAPSHOT_METADATA_CLASS = ExplorationRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = ExplorationRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of owners of this exploration.
owner_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to edit this exploration.
editor_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to voiceover this exploration.
voice_artist_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to view this exploration.
viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
# Whether this exploration is owned by the community.
community_owned = ndb.BooleanProperty(indexed=True, default=False)
# The exploration id which this exploration was cloned from. If None, this
# exploration was created from scratch.
cloned_from = ndb.StringProperty()
# For private explorations, whether this exploration can be viewed
# by anyone who has the URL. If the exploration is not private, this
# setting is ignored.
viewable_if_private = ndb.BooleanProperty(indexed=True, default=False)
# Time, in milliseconds, when the exploration was first published.
first_published_msec = ndb.FloatProperty(indexed=True, default=None)
# The publication status of this exploration.
status = ndb.StringProperty(
default=constants.ACTIVITY_STATUS_PRIVATE, indexed=True,
choices=[
constants.ACTIVITY_STATUS_PRIVATE,
constants.ACTIVITY_STATUS_PUBLIC
]
)
# DEPRECATED in v2.8.3. Do not use.
translator_ids = ndb.StringProperty(indexed=True, repeated=True)
def save(self, committer_id, commit_message, commit_cmds):
"""Saves a new version of the exploration, updating the Exploration
datastore model.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. The type of the command. A full list of command
types can be found in core/domain/exp_domain.py.
and then additional arguments for that command. For example:
{'cmd': 'AUTO_revert_version_number',
'version_number': 4}
"""
super(ExplorationRightsModel, self).commit(
committer_id, commit_message, commit_cmds)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(ExplorationRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
# Create and delete events will already be recorded in the
# ExplorationModel.
if commit_type not in ['create', 'delete']:
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
# TODO(msl): test if put_async() leads to any problems (make
# sure summary dicts get updated correctly when explorations
# are changed).
ExplorationCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
username=committer_username,
exploration_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=self.status,
post_commit_community_owned=self.community_owned,
post_commit_is_private=(
self.status == constants.ACTIVITY_STATUS_PRIVATE)
).put_async()
class ExplorationCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to explorations.
A new instance of this model is created and saved every time a commit to
ExplorationModel or ExplorationRightsModel occurs.
The id for this model is of the form
'exploration-{{EXP_ID}}-{{EXP_VERSION}}'.
"""
# The id of the exploration being edited.
exploration_id = ndb.StringProperty(indexed=True, required=True)
@classmethod
def get_multi(cls, exp_id, exp_versions):
"""Gets the ExplorationCommitLogEntryModels for the given exploration
id and exploration versions.
Args:
exp_id: str. The id of the exploration.
exp_versions: list(int). The versions of the exploration.
Returns:
list(ExplorationCommitLogEntryModel). The list of
ExplorationCommitLogEntryModel instances which matches the given
exp_id and exp_versions.
"""
instance_ids = [cls._get_instance_id(exp_id, exp_version)
for exp_version in exp_versions]
return super(ExplorationCommitLogEntryModel, cls).get_multi(
instance_ids)
@classmethod
def _get_instance_id(cls, exp_id, exp_version):
"""Returns ID of the exploration commit log entry model.
Args:
exp_id: str. The exploration id whose states are mapped.
exp_version: int. The version of the exploration.
Returns:
str. A string containing exploration ID and
exploration version.
"""
return 'exploration-%s-%s' % (exp_id, exp_version)
@classmethod
def get_all_non_private_commits(
cls, page_size, urlsafe_start_cursor, max_age=None):
"""Fetches a list of all the non-private commits sorted by their
last updated attribute.
Args:
page_size: int. The maximum number of entities to be returned.
urlsafe_start_cursor: str or None. If provided, the list of
returned entities starts from this datastore cursor.
Otherwise, the returned entities start from the beginning
of the full list of entities.
max_age: datetime.timedelta. The maximum time duration within which
commits are needed.
Returns:
3-tuple of (results, cursor, more) which were created which were
created no earlier than max_age before the current time where:
results: List of query results.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this will
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
if not isinstance(max_age, datetime.timedelta) and max_age is not None:
raise ValueError(
'max_age must be a datetime.timedelta instance or None.')
query = cls.query(cls.post_commit_is_private == False) # pylint: disable=singleton-comparison
if max_age:
query = query.filter(
cls.last_updated >= datetime.datetime.utcnow() - max_age)
return cls._fetch_page_sorted_by_last_updated(
query, page_size, urlsafe_start_cursor)
class ExpSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia exploration.
This should be used whenever the content blob of the exploration is not
needed (e.g. in search results, etc).
A ExpSummaryModel instance stores the following information:
id, title, category, objective, language_code, tags,
last_updated, created_on, status (private, public),
community_owned, owner_ids, editor_ids,
viewer_ids, version.
The key of each instance is the exploration id.
"""
# What this exploration is called.
title = ndb.StringProperty(required=True)
# The category this exploration belongs to.
category = ndb.StringProperty(required=True, indexed=True)
# The objective of this exploration.
objective = ndb.TextProperty(required=True, indexed=False)
# The ISO 639-1 code for the language this exploration is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# Tags associated with this exploration.
tags = ndb.StringProperty(repeated=True, indexed=True)
# Aggregate user-assigned ratings of the exploration.
ratings = ndb.JsonProperty(default=None, indexed=False)
# Scaled average rating for the exploration.
scaled_average_rating = ndb.FloatProperty(indexed=True)
# Time when the exploration model was last updated (not to be
# confused with last_updated, which is the time when the
# exploration *summary* model was last updated).
exploration_model_last_updated = ndb.DateTimeProperty(indexed=True)
# Time when the exploration model was created (not to be confused
# with created_on, which is the time when the exploration *summary*
# model was created).
exploration_model_created_on = ndb.DateTimeProperty(indexed=True)
# Time when the exploration was first published.
first_published_msec = ndb.FloatProperty(indexed=True)
# The publication status of this exploration.
status = ndb.StringProperty(
default=constants.ACTIVITY_STATUS_PRIVATE, indexed=True,
choices=[
constants.ACTIVITY_STATUS_PRIVATE,
constants.ACTIVITY_STATUS_PUBLIC
]
)
# Whether this exploration is owned by the community.
community_owned = ndb.BooleanProperty(required=True, indexed=True)
# The user_ids of owners of this exploration.
owner_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to edit this exploration.
editor_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to voiceover this exploration.
voice_artist_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who are allowed to view this exploration.
viewer_ids = ndb.StringProperty(indexed=True, repeated=True)
# The user_ids of users who have contributed (humans who have made a
# positive (not just a revert) change to the exploration's content).
contributor_ids = ndb.StringProperty(indexed=True, repeated=True)
# A dict representing the contributors of non-trivial commits to this
# exploration. Each key of this dict is a user_id, and the corresponding
# value is the number of non-trivial commits that the user has made.
contributors_summary = ndb.JsonProperty(default={}, indexed=False)
# The version number of the exploration after this commit. Only populated
# for commits to an exploration (as opposed to its rights, etc.).
version = ndb.IntegerProperty()
# DEPRECATED in v2.8.3. Do not use.
translator_ids = ndb.StringProperty(indexed=True, repeated=True)
@classmethod
def get_non_private(cls):
"""Returns an iterable with non-private ExpSummary models.
Returns:
iterable. An iterable with non-private ExpSummary models.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status != constants.ACTIVITY_STATUS_PRIVATE
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_top_rated(cls, limit):
"""Fetches the top-rated exp summaries that are public in descending
order of scaled_average_rating.
Args:
limit: int. The maximum number of results to return.
Returns:
iterable. An iterable with the top rated exp summaries that are
public in descending order of scaled_average_rating.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).order(
-ExpSummaryModel.scaled_average_rating
).fetch(limit)
@classmethod
def get_private_at_least_viewable(cls, user_id):
"""Fetches private exp summaries that are at least viewable by the
given user.
Args:
user_id: The id of the given user.
Returns:
iterable. An iterable with private exp summaries that are at least
viewable by the given user.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == constants.ACTIVITY_STATUS_PRIVATE
).filter(
ndb.OR(ExpSummaryModel.owner_ids == user_id,
ExpSummaryModel.editor_ids == user_id,
ExpSummaryModel.voice_artist_ids == user_id,
ExpSummaryModel.viewer_ids == user_id)
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_at_least_editable(cls, user_id):
"""Fetches exp summaries that are at least editable by the given user.
Args:
user_id: The id of the given user.
Returns:
iterable. An iterable with exp summaries that are at least
editable by the given user.
"""
return ExpSummaryModel.query().filter(
ndb.OR(ExpSummaryModel.owner_ids == user_id,
ExpSummaryModel.editor_ids == user_id)
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_recently_published(cls, limit):
"""Fetches exp summaries that are recently published.
Args:
limit: int. The maximum number of results to return.
Returns:
An iterable with exp summaries that are recently published. The
returned list is sorted by the time of publication with latest
being first in the list.
"""
return ExpSummaryModel.query().filter(
ExpSummaryModel.status == constants.ACTIVITY_STATUS_PUBLIC
).filter(
ExpSummaryModel.deleted == False # pylint: disable=singleton-comparison
).order(
-ExpSummaryModel.first_published_msec
).fetch(limit)
class StateIdMappingModel(base_models.BaseModel):
"""DEPRECATED: DO NOT USE.
State ID model for Oppia explorations.
This model maps each exploration version's state to a unique id.
Note: use the state id only for derived data, but not for data that’s
regarded as the source of truth, as the rules for assigning state id may
change in future.
The key of each instance is a combination of exploration id and version.
"""
# The exploration id whose states are mapped.
exploration_id = ndb.StringProperty(indexed=True, required=True)
# The version of the exploration.
exploration_version = ndb.IntegerProperty(indexed=True, required=True)
# A dict which maps each state name to a unique id.
state_names_to_ids = ndb.JsonProperty(required=True)
# Latest state id that has been assigned to any of the states in any of
# of the versions of given exploration. New state IDs should be assigned
# from this value + 1.
largest_state_id_used = ndb.IntegerProperty(indexed=True, required=True)
@classmethod
def create(
cls, exp_id, exp_version, state_names_to_ids,
largest_state_id_used, overwrite=False):
"""Creates a new instance of state id mapping model.
Args:
exp_id: str. The exploration id whose states are mapped.
exp_version: int. The version of that exploration.
state_names_to_ids: dict. A dict storing state name to ids mapping.
largest_state_id_used: int. The largest integer so far that has been
used as a state ID for this exploration.
overwrite: bool. Whether overwriting of an existing model should
be allowed.
Returns:
StateIdMappingModel. Instance of the state id mapping model.
"""
instance_id = cls._generate_instance_id(exp_id, exp_version)
if not overwrite and cls.get_by_id(instance_id):
raise Exception(
'State id mapping model already exists for exploration %s,'
' version %d' % (exp_id, exp_version))
model = cls(
id=instance_id, exploration_id=exp_id,
exploration_version=exp_version,
state_names_to_ids=state_names_to_ids,
largest_state_id_used=largest_state_id_used)
model.put()
return model
@classmethod
def _generate_instance_id(cls, exp_id, exp_version):
"""Generates ID of the state id mapping model instance.
Args:
exp_id: str. The exploration id whose states are mapped.
exp_version: int. The version of the exploration.
Returns:
str. A string containing exploration ID and
exploration version.
"""
return '%s.%d' % (exp_id, exp_version)
@classmethod
def get_state_id_mapping_model(cls, exp_id, exp_version):
"""Retrieve state id mapping model from the datastore.
Args:
exp_id: str. The exploration id.
exp_version: int. The exploration version.
Returns:
StateIdMappingModel. The model retrieved from the datastore.
"""
instance_id = cls._generate_instance_id(exp_id, exp_version)
instance = cls.get(instance_id)
return instance
@classmethod
def delete_state_id_mapping_models(cls, exp_id, exp_versions):
"""Removes state id mapping models present in state_id_mapping_models.
Args:
exp_id: str. The id of the exploration.
exp_versions: list(int). A list of exploration versions for which
the state id mapping model is to be deleted.
"""
keys = [
ndb.Key(cls, cls._generate_instance_id(exp_id, exp_version))
for exp_version in exp_versions]
ndb.delete_multi(keys)
| souravbadami/oppia | core/storage/exploration/gae_models.py | Python | apache-2.0 | 26,280 |
# -*- coding: utf-8 -*-
'''
The function cache system allows for data to be stored on the master so it can be easily read by other minions
'''
# Import python libs
import copy
import logging
# Import salt libs
import salt.crypt
import salt.payload
log = logging.getLogger(__name__)
def _auth():
'''
Return the auth object
'''
if 'auth' not in __context__:
__context__['auth'] = salt.crypt.SAuth(__opts__)
return __context__['auth']
def update(clear=False):
'''
Execute the configured functions and send the data back up to the master
The functions to be executed are merged from the master config, pillar and
minion config under the option "function_cache":
.. code-block:: yaml
mine_functions:
network.ip_addrs:
- eth0
disk.usage: []
The function cache will be populated with information from executing these
functions
CLI Example:
.. code-block:: bash
salt '*' mine.update
'''
m_data = __salt__['config.option']('mine_functions', {})
data = {}
for func in m_data:
if func not in __salt__:
log.error('Function {0} in mine_functions not available'
.format(func))
continue
try:
if m_data[func] and isinstance(m_data[func], dict):
data[func] = __salt__[func](**m_data[func])
elif m_data[func] and isinstance(m_data[func], list):
data[func] = __salt__[func](*m_data[func])
else:
data[func] = __salt__[func]()
except Exception:
log.error('Function {0} in mine_functions failed to execute'
.format(func))
continue
if __opts__['file_client'] == 'local':
if not clear:
old = __salt__['data.getval']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
'clear': clear,
'tok': auth.gen_token('salt'),
}
sreq = salt.payload.SREQ(__opts__['master_uri'])
ret = sreq.send('aes', auth.crypticle.dumps(load))
return auth.crypticle.loads(ret)
def send(func, *args, **kwargs):
'''
Send a specific function to the mine.
CLI Example:
.. code-block:: bash
salt '*' mine.send network.interfaces eth0
'''
if not func in __salt__:
return False
data = {}
arg_data = salt.utils.arg_lookup(__salt__[func])
func_data = copy.deepcopy(kwargs)
for ind, _ in enumerate(arg_data.get('args', [])):
try:
func_data[arg_data['args'][ind]] = args[ind]
except IndexError:
# Safe error, arg may be in kwargs
pass
f_call = salt.utils.format_call(__salt__[func], func_data)
try:
if 'kwargs' in f_call:
data[func] = __salt__[func](*f_call['args'], **f_call['kwargs'])
else:
data[func] = __salt__[func](*f_call['args'])
except Exception as exc:
log.error('Function {0} in mine.send failed to execute: {1}'
.format(func, exc))
return False
if __opts__['file_client'] == 'local':
old = __salt__['data.getval']('mine_cache')
if isinstance(old, dict):
old.update(data)
data = old
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine',
'data': data,
'id': __opts__['id'],
'tok': auth.gen_token('salt'),
}
sreq = salt.payload.SREQ(__opts__['master_uri'])
ret = sreq.send('aes', auth.crypticle.dumps(load))
return auth.crypticle.loads(ret)
def get(tgt, fun, expr_form='glob'):
'''
Get data from the mine based on the target, function and expr_form
Targets can be matched based on any standard matching system that can be
matched on the master via these keywords::
glob
pcre
grain
grain_pcre
CLI Example:
.. code-block:: bash
salt '*' mine.get '*' network.interfaces
salt '*' mine.get 'os:Fedora' network.interfaces grain
'''
if expr_form.lower == 'pillar':
log.error('Pillar matching not supported on mine.get')
return ''
if __opts__['file_client'] == 'local':
ret = {}
is_target = {'glob': __salt__['match.glob'],
'pcre': __salt__['match.pcre'],
'list': __salt__['match.list'],
'grain': __salt__['match.grain'],
'grain_pcre': __salt__['match.grain_pcre'],
'compound': __salt__['match.compound'],
'ipcidr': __salt__['match.ipcidr'],
}[expr_form](tgt)
if is_target:
data = __salt__['data.getval']('mine_cache')
if isinstance(data, dict) and fun in data:
ret[__opts__['id']] = data[fun]
return ret
auth = _auth()
load = {
'cmd': '_mine_get',
'id': __opts__['id'],
'tgt': tgt,
'fun': fun,
'expr_form': expr_form,
'tok': auth.gen_token('salt'),
}
sreq = salt.payload.SREQ(__opts__['master_uri'])
ret = sreq.send('aes', auth.crypticle.dumps(load))
return auth.crypticle.loads(ret)
def delete(fun):
'''
Remove specific function contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.delete 'network.interfaces'
'''
if __opts__['file_client'] == 'local':
data = __salt__['data.getval']('mine_cache')
if isinstance(data, dict) and fun in data:
del data[fun]
return __salt__['data.update']('mine_cache', data)
auth = _auth()
load = {
'cmd': '_mine_delete',
'id': __opts__['id'],
'fun': fun,
'tok': auth.gen_token('salt'),
}
sreq = salt.payload.SREQ(__opts__['master_uri'])
ret = sreq.send('aes', auth.crypticle.dumps(load))
return auth.crypticle.loads(ret)
def flush():
'''
Remove all mine contents of minion. Returns True on success.
CLI Example:
.. code-block:: bash
salt '*' mine.flush
'''
if __opts__['file_client'] == 'local':
return __salt__['data.update']('mine_cache', {})
auth = _auth()
load = {
'cmd': '_mine_flush',
'id': __opts__['id'],
'tok': auth.gen_token('salt'),
}
sreq = salt.payload.SREQ(__opts__['master_uri'])
ret = sreq.send('aes', auth.crypticle.dumps(load))
return auth.crypticle.loads(ret)
| victorywang80/Maintenance | saltstack/src/salt/modules/mine.py | Python | apache-2.0 | 6,869 |
from setuptools import find_packages
from os import path, environ
import io
import os
import re
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
# pip's single-source version method as described here:
# https://python-packaging-user-guide.readthedocs.io/single_source_version/
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# fetch include and library directories
inc_dirs = [np.get_include(), '/usr/local/include/librealsense']
lib_dirs = ['/usr/local/lib']
# windows environment variables
if 'PYRS_INCLUDES' in environ:
inc_dirs.append(environ['PYRS_INCLUDES'])
if 'PYRS_LIBS' in environ:
lib_dirs.append(environ['PYRS_LIBS'])
# cython extension, dont build if docs
on_rtd = environ.get('READTHEDOCS') == 'True'
if on_rtd:
module = []
else:
module = cythonize(
[Extension(
name='pyrealsense.rsutilwrapper',
sources=["pyrealsense/rsutilwrapper.pyx", "pyrealsense/rsutilwrapperc.cpp"],
libraries=['realsense'],
include_dirs=inc_dirs,
library_dirs=lib_dirs,
language="c++",)])
# create long description from readme for pypi
here = path.abspath(path.dirname(__file__))
with io.open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(name='pyrealsense',
version=find_version('pyrealsense', '__init__.py'),
description='Cross-platform ctypes/Cython wrapper to the librealsense library.',
long_description=long_description,
author='Antoine Loriette',
author_email='[email protected]',
url='https://github.com/toinsson/pyrealsense',
license='Apache',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
# 'License :: OSem :: Hardware',
],
keywords='realsense',
packages=find_packages(),
ext_modules=module,
setup_requires=['numpy', 'cython'],
install_requires=['numpy', 'cython', 'pycparser', 'six'])
| toinsson/pyrealsense | setup.py | Python | apache-2.0 | 2,524 |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from mock import patch
from nose.tools import assert_equals, assert_raises
import pygenie
from ..utils import FakeRunningJob
assert_equals.__self__.maxDiff = None
@pygenie.adapter.genie_3.set_jobname
def set_jobname(job):
return dict()
@patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'})
class TestingGenieJob(unittest.TestCase):
"""Test GenieJob."""
def test_default_command_tag(self):
"""Test GenieJob default command tags."""
job = pygenie.jobs.GenieJob()
assert_equals(
job.get('default_command_tags'),
[u'type:genie']
)
def test_cmd_args_explicit(self):
"""Test GenieJob explicit cmd args."""
job = pygenie.jobs.GenieJob() \
.command_arguments('explicitly stating command args')
assert_equals(
job.cmd_args,
u'explicitly stating command args'
)
def test_cmd_args_constructed(self):
"""Test GenieJob constructed cmd args."""
with assert_raises(pygenie.exceptions.GenieJobError) as cm:
pygenie.jobs.GenieJob().cmd_args
@patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'})
class TestingGenieJobRepr(unittest.TestCase):
"""Test GenieJob repr."""
@patch('pygenie.jobs.core.is_file')
def test_repr(self, is_file):
"""Test GenieJob repr."""
is_file.return_value = True
job = pygenie.jobs.GenieJob() \
.applications('app1') \
.applications('app2') \
.archive(False) \
.cluster_tags('cluster1') \
.cluster_tags('cluster2') \
.command_arguments('genie job repr args') \
.command_tags('cmd1') \
.command_tags('cmd2') \
.dependencies('/dep1') \
.dependencies('/dep2') \
.description('description') \
.disable_archive() \
.genie_email('[email protected]') \
.genie_setup_file('/setup.sh') \
.genie_timeout(999) \
.genie_url('http://asdfasdf') \
.genie_username('jsmith') \
.group('group1') \
.job_id('geniejob_repr') \
.job_name('geniejob_repr') \
.job_version('1.1.1') \
.parameter('param1', 'pval1') \
.parameter('param2', 'pval2') \
.parameters(param3='pval3', param4='pval4') \
.post_cmd_args('post1') \
.post_cmd_args(['post2', 'post3']) \
.tags('tag1') \
.tags('tag2')
assert_equals(
str(job),
'.'.join([
'GenieJob()',
'applications("app1")',
'applications("app2")',
'archive(False)',
'cluster_tags("cluster1")',
'cluster_tags("cluster2")',
'command_arguments("genie job repr args")',
'command_tags("cmd1")',
'command_tags("cmd2")',
'dependencies("/dep1")',
'dependencies("/dep2")',
'description("description")',
'genie_email("[email protected]")',
'genie_setup_file("/setup.sh")',
'genie_timeout(999)',
'genie_url("http://asdfasdf")',
'genie_username("jsmith")',
'group("group1")',
'job_id("geniejob_repr")',
'job_name("geniejob_repr")',
'job_version("1.1.1")',
'parameter("param1", "pval1")',
'parameter("param2", "pval2")',
'parameter("param3", "pval3")',
'parameter("param4", "pval4")',
'post_cmd_args("post1")',
"post_cmd_args([u'post2', u'post3'])",
'tags("tag1")',
'tags("tag2")'
])
)
def test_genie_cpu(self):
"""Test GenieJob repr (genie_cpu)."""
job = pygenie.jobs.GenieJob() \
.job_id('123') \
.genie_username('user') \
.genie_cpu(12)
assert_equals(
'.'.join([
'GenieJob()',
'genie_cpu(12)',
'genie_username("user")',
'job_id("123")'
]),
str(job)
)
def test_genie_memory(self):
"""Test GenieJob repr (genie_memory)."""
job = pygenie.jobs.GenieJob() \
.job_id('123') \
.genie_username('user') \
.genie_memory(7000)
assert_equals(
'.'.join([
'GenieJob()',
'genie_memory(7000)',
'genie_username("user")',
'job_id("123")'
]),
str(job)
)
@patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'})
class TestingGenieJobAdapters(unittest.TestCase):
"""Test adapting GenieJob to different clients."""
def setUp(self):
self.dirname = os.path.dirname(os.path.realpath(__file__))
def test_genie3_payload(self):
"""Test GenieJob payload for Genie 3."""
with patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'}):
genie3_conf = pygenie.conf.GenieConf() \
.load_config_file(os.path.join(self.dirname, 'genie3.ini'))
job = pygenie.jobs.GenieJob(genie3_conf) \
.applications(['applicationid1']) \
.cluster_tags('type:cluster1') \
.command_arguments('command args for geniejob') \
.command_tags('type:geniecmd') \
.dependencies(['/file1', '/file2']) \
.description('this job is to test geniejob adapter') \
.archive(False) \
.genie_cpu(3) \
.genie_email('[email protected]') \
.genie_memory(999) \
.genie_timeout(100) \
.genie_url('http://fdsafdsa') \
.genie_username('jdoe') \
.group('geniegroup1') \
.job_id('geniejob1') \
.job_name('testing_adapting_geniejob') \
.tags('tag1, tag2') \
.job_version('0.0.1alpha')
assert_equals(
pygenie.adapter.genie_3.get_payload(job),
{
'applications': ['applicationid1'],
'attachments': [],
'clusterCriterias': [
{'tags': ['type:cluster1']},
{'tags': ['type:genie']},
],
'commandArgs': 'command args for geniejob',
'commandCriteria': ['type:geniecmd'],
'cpu': 3,
'dependencies': ['/file1', '/file2'],
'description': 'this job is to test geniejob adapter',
'disableLogArchival': True,
'email': '[email protected]',
'group': 'geniegroup1',
'id': 'geniejob1',
'memory': 999,
'name': 'testing_adapting_geniejob',
'setupFile': None,
'tags': ['tag1', 'tag2'],
'timeout': 100,
'user': 'jdoe',
'version': '0.0.1alpha'
}
)
@patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'})
class TestingJobExecute(unittest.TestCase):
"""Test executing job."""
@patch('pygenie.jobs.core.reattach_job')
@patch('pygenie.jobs.core.generate_job_id')
@patch('pygenie.jobs.core.execute_job')
def test_job_execute(self, exec_job, gen_job_id, reattach_job):
"""Testing job execution."""
job = pygenie.jobs.HiveJob() \
.job_id('exec') \
.genie_username('exectester') \
.script('select * from db.table')
job.execute()
gen_job_id.assert_not_called()
reattach_job.assert_not_called()
exec_job.assert_called_once_with(job)
@patch('pygenie.jobs.core.reattach_job')
@patch('pygenie.jobs.core.generate_job_id')
@patch('pygenie.jobs.core.execute_job')
def test_job_execute_retry(self, exec_job, gen_job_id, reattach_job):
"""Testing job execution with retry."""
job_id = 'exec-retry'
new_job_id = '{}-5'.format(job_id)
gen_job_id.return_value = new_job_id
reattach_job.side_effect = pygenie.exceptions.GenieJobNotFoundError
job = pygenie.jobs.HiveJob() \
.job_id(job_id) \
.genie_username('exectester') \
.script('select * from db.table')
job.execute(retry=True)
gen_job_id.assert_called_once_with(job_id,
return_success=True,
conf=job._conf)
reattach_job.assert_called_once_with(new_job_id, conf=job._conf)
exec_job.assert_called_once_with(job)
assert_equals(new_job_id, job._job_id)
@patch('pygenie.jobs.core.reattach_job')
@patch('pygenie.jobs.core.generate_job_id')
@patch('pygenie.jobs.core.execute_job')
def test_job_execute_retry_force(self, exec_job, gen_job_id, reattach_job):
"""Testing job execution with force retry."""
job_id = 'exec-retry-force'
new_job_id = '{}-8'.format(job_id)
gen_job_id.return_value = new_job_id
reattach_job.side_effect = pygenie.exceptions.GenieJobNotFoundError
job = pygenie.jobs.HiveJob() \
.job_id(job_id) \
.genie_username('exectester') \
.script('select * from db.table')
job.execute(retry=True, force=True)
gen_job_id.assert_called_once_with(job_id,
return_success=False,
conf=job._conf)
reattach_job.assert_called_once_with(new_job_id, conf=job._conf)
exec_job.assert_called_once_with(job)
assert_equals(new_job_id, job._job_id)
@patch.dict('os.environ', {'GENIE_BYPASS_HOME_CONFIG': '1'})
class TestingSetJobName(unittest.TestCase):
"""Test setting job name from script."""
def test_set_job_name(self):
"""Test setting job name from script contents."""
assert_equals(
{'name': 'SELECT * FROM db.table'},
set_jobname(pygenie.jobs.PrestoJob() \
.script('SELECT * FROM db.table'))
)
def test_set_job_name_truncate(self):
"""Test setting job name from script contents (with truncate)."""
job_name = set_jobname(
pygenie.jobs.PrestoJob()\
.script(''.join([str(i) for i in range(100)]))
).get('name') or ''
assert_equals(
40,
len(job_name)
)
def test_set_job_name_newline(self):
"""Test setting job name from script contents (with newline)."""
assert_equals(
{'name': 'SELECT * FROM db.table'},
set_jobname(pygenie.jobs.PrestoJob() \
.script("SELECT\n*\nFROM\ndb.table"))
)
def test_set_job_name_parameter(self):
"""Test setting job name from script contents (with parameter)."""
assert_equals(
{'name': 'SELECT * FROM db.{table}'},
set_jobname(pygenie.jobs.PrestoJob() \
.script("SELECT * FROM db.${table}"))
)
def test_set_job_name_semicolon(self):
"""Test setting job name from script contents (with semicolon)."""
assert_equals(
{'name': 'SELECT * FROM db.table'},
set_jobname(pygenie.jobs.PrestoJob() \
.script("SELECT * FROM db.table;"))
)
def test_set_job_name_quotes(self):
"""Test setting job name from script contents (with quotes)."""
assert_equals(
{'name': 'min(values) r = foo order by date, hour'},
set_jobname(pygenie.jobs.PrestoJob() \
.script("min(\"values\") r = 'foo' order by date, hour;"))
)
| ajoymajumdar/genie | genie-client/src/main/python/tests/job_tests/test_geniejob.py | Python | apache-2.0 | 12,048 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotations."""
import tvm
from tvm import relay
import pytest
def test_on_device_via_string():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.virtual_device.virtual_device_id == 0
assert call.attrs.virtual_device.target is None
assert call.attrs.virtual_device.memory_scope == ""
assert call.attrs.constrain_body
assert not call.attrs.constrain_result
def test_on_device_via_device():
x = relay.Var("x")
call = relay.annotation.on_device(x, tvm.device("cpu"))
assert call.attrs.virtual_device.device_type_int == 1 # ie kDLCPU
def test_on_device_invalid_device():
x = relay.Var("x")
pytest.raises(ValueError, lambda: relay.annotation.on_device(x, "bogus"))
def test_on_device_fixed():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=True)
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.constrain_body
assert call.attrs.constrain_result
def test_on_device_free():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=False, constrain_body=False)
assert call.attrs.virtual_device.device_type_int == -1 # ie kInvalidDeviceType
assert not call.attrs.constrain_body
assert not call.attrs.constrain_result
def test_function_on_device():
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], relay.add(x, y))
func = relay.annotation.function_on_device(f, ["cpu", "cuda"], "cuda")
assert isinstance(func, relay.Function)
assert len(func.attrs["param_virtual_devices"]) == 2
assert func.attrs["param_virtual_devices"][0].device_type_int == 1 # ie kDLCPU
assert func.attrs["param_virtual_devices"][1].device_type_int == 2 # ie kDLCUDA
assert func.virtual_device_.device_type_int == 2 # ie KDLCUDA
if __name__ == "__main__":
import sys
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| dmlc/tvm | tests/python/relay/op/annotation/test_annotation.py | Python | apache-2.0 | 2,947 |
#
# SNMPv1 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1157.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import univ, namedtype, namedval, tag
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString): pass
class RequestID(univ.Integer): pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer): pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(
namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
('authenticationFailure', 4), ('egpNeighborLoss', 5),
('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
| itielshwartz/BackendApi | lib/pyasn1_modules/rfc1157.py | Python | apache-2.0 | 3,309 |
import webapp2
from google.appengine.ext import db
import logging
import charbuilder
import traits
import traceback
import random
import string
instance_key = "".join(
(random.choice(string.ascii_uppercase + string.digits) for i in xrange(25)))
def getFile(_file):
with open(_file, "r") as f: return f.read().replace("\n", "")
HTML = {"main_page": getFile("main_page.html"),
"creation_page": getFile("creation_page.html")}
def mergeDicts(master_dict):
new_dict = []
for dictionary in master_dict.keys():
if not dictionary:
continue
new_dict.extend(master_dict[dictionary].items())
return dict(new_dict)
class Parameters(db.Model):
parameters = db.StringProperty()
order = db.DateTimeProperty(auto_now=True)
instance_key = db.StringProperty()
class MainPage(webapp2.RequestHandler):
fields = {"cat_checkboxes": "",
"spell_checkboxes": ""}
def get(self):
"""
"""
self.response.headers['Content-Type'] = 'text/html' # tells the page to load as html instead of plain text
try:
self.configureCatBoxes()
self.configureSpellCollegeCheckboxes()
self.response.write(HTML["main_page"] % self.fields) # renders the main_page.html contents
except Exception:
self.response.write(traceback.format_exc()) # if there was an error, write that instead of the main_page
def configureSpellCollegeCheckboxes(self):
spell_colleges = {"MC": "Mind Control",
"Meta": "Meta",
"L/D": "Light & Darkness",
"Move.": "Movement",
"BC": "Body Control",
"Fire": "Fire",
"P/W": "Protection & Warning",
"Air": "Air",
"Water": "Water",
"Ench.": "Enchantment",
"C/E": "Communication & Emptahy",
"Healing": "Healing",
"Know.": "Knowledge",
"Earth": "Earth",
"Gate": "Gate",
"Necro.": "Necromantic"}
checkbox_html = '<input type="checkbox" name="spell_colleges" value="%s"> %s'
column = 0
complete_html = "<table>"
for cat in sorted(spell_colleges.keys()):
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, spell_colleges[cat]) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
column += 1 # go to the next column
complete_html += "</table>" # close the table
self.fields["spell_checkboxes"] = complete_html
def configureCatBoxes(self):
psionic_powers = ["Antipsi", "Esp", "Psychic Healing",
"Psychokinesis", "Teleportation", "Telepathy"]
power_cats = []
checkbox_html = '<input type="checkbox" name="cat_type" value="%s"> %s'
column = 0
complete_html = "<table>"
for cat in sorted(traits.traits.SKILL_CATEGORIES):
if cat in psionic_powers:
power_cats.append(cat)
continue
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, cat) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
column += 1 # go to the next column
complete_html += "</table>"
complete_html += "<br><b>Psionic Powers</b><br>"
complete_html += "<table>"
column = 0
for cat in power_cats:
if column > 5:
column = 0
if column == 0:
complete_html += "<tr>" # starts a new table row
y = checkbox_html % (cat, cat) # this puts whatever the current category is as the value and text to display
complete_html += "<td> %s </td>" % (y) # puts the entire line as column with the td tag
column += 1
complete_html += "</table>"
self.fields["cat_checkboxes"] = complete_html
def post(self):
"""
"""
self.response.headers['Content-Type'] = 'text/html'
try:
try:
fd = self.getRequests()
logging.info(fd)
self.saveParameters(fd)
except ValueError:
fd = self.getParameters()
new_character = charbuilder.CharacterBuilder(fd)
# Write the generated character to the page after formatting
nc = mergeDicts(new_character.__dict__)
self.response.write(HTML["creation_page"] % (nc))
except:
self.response.write(traceback.format_exc())
def getRequests(self):
"""Returns all form data from current set parameters.
"""
return {"points": int(self.request.get("points")),
"tl": int(self.request.get("Tech Level")),
"adv_types": self.request.get_all("adv_type"),
"disadv_types": self.request.get_all("disadv_type"),
"d_limit": self.request.get("d_limit"),
"categories": self.request.get_all("cat_type"),
"pa": self.request.get("pa"),
"sa": self.request.get("sa"),
"ta": self.request.get("ta"),
"spell_colleges": self.request.get_all("spell_colleges")
}
def saveParameters(self, data):
"""
"""
# convert python dict syntax to a string
string_data = repr(data)
new_entity = Parameters()
new_entity.parameters = string_data
# save data
new_entity.put()
logging.info(instance_key)
def getParameters(self):
"""
"""
all_data = Parameters.all()
all_data.order("-order")
try:
fd = eval(all_data.fetch(limit=1)[0].parameters)
except IndexError:
fd = None
return fd
handlers = [("/", MainPage)]
application = webapp2.WSGIApplication(handlers, debug=True)
| jtsmith1287/gurpscg | main.py | Python | apache-2.0 | 5,971 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR : MIN
PURPOSE : the deep learning CNN model, similar as inception
VERSION : 0.1
DATE : 4.2017
"""
__author__ = 'Min'
import math
import time
import tensorflow as tf
from datetime import datetime
NUM_CLASSES = 50
slim = tf.contrib.slim
# 产生截断的正太分布
# produce a truncated normal distriubtion
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
# 生成默认参数
# Generate default parameters
def flowerNet_arg_scope(weight_decay = 0.00004, stddev = 0.1,
batch_norm_var_collection = 'moving_vars'):
batch_norm_params = {
# 参数衰减系数
# parameter attenuation coefficient
'decay': 0.9997,
'epsilon': 0.001,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# auto assign default values
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer = slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d],
weights_initializer = tf.truncated_normal_initializer(stddev = stddev),
activation_fn = tf.nn.relu, #Activation function
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_params) as scope:
return scope
# 生成网络的卷积 池化部分
# generate convolitonal layer and pooling layer in the CNN
def flowerNet_base(inputs, scope = None):
end_points = {}
with tf.variable_scope(scope, 'Inception', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride = 1, padding = 'VALID'):
# convolutional layer 3x3/2 32
net = slim.conv2d(inputs, 32, [3, 3], stride = 2, scope = 'conv')
# convolutional layer 3x3/1 32
net = slim.conv2d(net, 32, [3, 3], scope = 'conv_1')
# convolutional layer 3x3/1 64
net = slim.conv2d(net, 64, [3, 3], padding= 'SAME',
scope = 'conv_2')
# max pool layer 3x3/2
net = slim.max_pool2d(net, [3, 3], stride = 2, scope = 'pool')
# convolutional layer 1x1/1 80
net = slim.conv2d(net, 80, [1, 1], scope = 'conv_3')
# convolutional layer 3x3/1 192
net = slim.conv2d(net, 192, [3, 3], scope = 'conv_4')
# max pool layer 3,3/2
net = slim.max_pool2d(net, [3, 3], stride = 2, scope = 'pool_1')
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride = 1, padding = 'SAME'):
# mixed module 1
with tf.variable_scope('mixed'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 32, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 2
with tf.variable_scope('mixed_1'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 64, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 3
with tf.variable_scope('mixed_2'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 48, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 64, [5, 5], scope = 'conv1')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv1')
branch2 = slim.conv2d(branch2, 96, [3, 3], scope = 'conv2')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 64, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 4
with tf.variable_scope('mixed_3'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 384, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 64, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 96, [3, 3], scope = 'conv1')
branch1 = slim.conv2d(branch1, 96, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.max_pool2d(net, [3, 3], stride = 2,
padding = 'VALID', scope = 'maxPool')
net = tf.concat([branch0, branch1, branch2], 3)
# mixed module 5
with tf.variable_scope('mixed_4'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 128, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 128, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 128, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 128, [7, 1], scope = 'conv1')
branch2 = slim.conv2d(branch2, 128, [1, 7], scope = 'conv2')
branch2 = slim.conv2d(branch2, 128, [7, 1], scope = 'conv3')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv4')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 6
with tf.variable_scope('mixed_5'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 160, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 160, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 160, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 160, [7, 1], scope = 'conv1')
branch2 = slim.conv2d(branch2, 160, [1, 7], scope = 'conv2')
branch2 = slim.conv2d(branch2, 160, [7, 1], scope = 'conv3')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv4')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 7
with tf.variable_scope('mixed_6'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 160, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 160, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 160, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 160, [7, 1], scope = 'conv1')
branch2 = slim.conv2d(branch2, 160, [1, 7], scope = 'conv2')
branch2 = slim.conv2d(branch2, 160, [7, 1], scope = 'conv3')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv4')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 8
with tf.variable_scope('mixed_7'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 192, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 192, [7, 1], scope = 'conv1')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv2')
branch2 = slim.conv2d(branch2, 192, [7, 1], scope = 'conv3')
branch2 = slim.conv2d(branch2, 192, [1, 7], scope = 'conv4')
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
end_points['mixed_7'] = net
# mixed module 9
with tf.variable_scope('mixed_8'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
branch0 = slim.conv2d(branch0, 320, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv1')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 192, [1, 1], scope = 'conv')
branch1 = slim.conv2d(branch1, 192, [1, 7], scope = 'conv1')
branch1 = slim.conv2d(branch1, 192, [7, 1], scope = 'conv2')
branch1 = slim.conv2d(branch1, 192, [3, 3], stride = 2,
padding = 'VALID', scope = 'conv3')
with tf.variable_scope('branch2'):
branch2 = slim.max_pool2d(net, [3, 3], stride = 2,
padding = 'VALID', scope = 'maxPool')
net = tf.concat([branch0, branch1, branch2], 3)
# mixed module 10
with tf.variable_scope('mixed_9'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 320, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 384, [1, 1], scope = 'conv')
branch1 = tf.concat([
slim.conv2d(branch1, 384, [1, 3], scope = 'conv1'),
slim.conv2d(branch1, 384, [3, 1], scope = 'conv2')], 3)
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 448, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 384, [3, 3], scope = 'conv1')
branch2 = tf.concat([
slim.conv2d(branch2, 384, [1, 3], scope = 'conv2'),
slim.conv2d(branch2, 384, [3, 1], scope = 'conv3')], 3)
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
# mixed module 11
with tf.variable_scope('mixed_10'):
with tf.variable_scope('branch0'):
branch0 = slim.conv2d(net, 320, [1, 1], scope = 'conv')
with tf.variable_scope('branch1'):
branch1 = slim.conv2d(net, 384, [1, 1], scope = 'conv')
branch1 = tf.concat([
slim.conv2d(branch1, 384, [1, 3], scope = 'conv1'),
slim.conv2d(branch1, 384, [3, 1], scope = 'conv2')], 3)
with tf.variable_scope('branch2'):
branch2 = slim.conv2d(net, 448, [1, 1], scope = 'conv')
branch2 = slim.conv2d(branch2, 384, [3, 3], scope = 'conv1')
branch2 = tf.concat([
slim.conv2d(branch2, 384, [1, 3], scope = 'conv2'),
slim.conv2d(branch2, 384, [3, 1], scope = 'conv3')], 3)
with tf.variable_scope('branch3'):
branch3 = slim.avg_pool2d(net, [3, 3], scope = 'avgPool')
branch3 = slim.conv2d(branch3, 192, [1, 1], scope = 'conv')
net = tf.concat([branch0, branch1, branch2, branch3], 3)
return net, end_points
# global avg pool and softmax and logits
def flowerNet(inputs, numClasses, isTraining = True,
dropoutKeepProb = 0.8, predictionFN = slim.softmax,
spatialSqueeze = True, reuse = None, scope = 'flowerNet'):
with tf.variable_scope(scope, 'flowerNet', [inputs, numClasses],
reuse = reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training = isTraining):
net, end_points = flowerNet_base(inputs, scope = scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride = 1, padding = 'SAME'):
aux_logits = end_points['mixed_7']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(aux_logits,
[5, 5], stride = 3,
padding = 'VALID', scope = 'avgPool')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope = 'conv')
aux_logits = slim.conv2d(aux_logits, 768, [5, 5],
weights_initializer = trunc_normal(0.01),
padding = 'VALID', scope = 'conv1')
aux_logits = slim.conv2d(aux_logits, numClasses, [1, 1],
activation_fn = None,
normalizer_fn = None,
weights_initializer = trunc_normal(0.001),
scope = 'conv2')
if spatialSqueeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name = 'SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [8, 8], padding = 'VALID',
scope = 'avgPool')
net = slim.dropout(net, keep_prob = dropoutKeepProb,
scope = 'dropout')
end_points['PreLogits'] = net
logits = slim.conv2d(net, numClasses, [1, 1], activation_fn = None,
normalizer_fn = None, scope = 'conv')
if spatialSqueeze:
logits = tf.squeeze(logits, [1, 2], name = 'SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = predictionFN(logits, scope = 'Predictions')
return logits, end_points
def time_test(session, target, info_string):
num_steps_burn = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range(num_batches + num_steps_burn):
startTime = time.time()
_ = session.run(target)
duration = time.time() - startTime
if i >= num_steps_burn:
if not i % 10:
print('%s: step %d, duration = %.3f' % (datetime.now(),
i - num_steps_burn, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / num_batches
vr =total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, num_batches, mn, sd))
if __name__ == '__main__':
batchSize = 100
height, weight = 299, 299
inputs = tf.random_uniform((batchSize, height, weight, 3))
with slim.arg_scope(flowerNet_arg_scope()):
logits, end_points = flowerNet(inputs, isTraining = False,
numClasses = NUM_CLASSES)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
num_batches = 100
time_test(sess, logits, "Forward")
#END
| HeavenMin/PlantImageRecognition | deepLearning/flowerNet.py | Python | apache-2.0 | 19,281 |
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.fogbow.usage import views
from openstack_dashboard.dashboards.fogbow.usage.views import IndexView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^(?P<member_id>.*)/usage$', views.getSpecificMemberUsage, name='usage'),
)
| fogbow/fogbow-dashboard | openstack_dashboard/dashboards/fogbow/usage/urls.py | Python | apache-2.0 | 404 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module implements client/server messages emitted from plugins.
# Things are defined here to 'encourage' developers to coordinate information
# format. This is also how different data formats are supported
import confluent.exceptions as exc
import json
def _htmlify_structure(indict):
ret = "<ul>"
if isinstance(indict, dict):
for key in indict.iterkeys():
ret += "<li>{0}: ".format(key)
if type(indict[key]) in (str, unicode, float, int):
ret += str(indict[key])
else:
ret += _htmlify_structure(indict[key])
elif isinstance(indict, list):
if len(indict) > 0:
if type(indict[0]) in (str, unicode):
ret += ",".join(indict)
else:
for v in indict:
ret += _htmlify_structure(v)
return ret + '</ul>'
class ConfluentMessage(object):
readonly = False
defaultvalue = ''
defaulttype = 'text'
def __init__(self):
self.desc = ''
self.stripped = False
self.kvpairs = {}
raise NotImplementedError("Must be subclassed!")
def json(self):
# This will create the canonical json representation of this message
if hasattr(self, 'stripped') and self.stripped:
datasource = self.kvpairs
else:
datasource = {'databynode': self.kvpairs}
jsonsnippet = json.dumps(datasource, separators=(',', ':'))[1:-1]
return jsonsnippet
def raw(self):
"""Return pythonic representation of the response.
Used by httpapi while assembling data prior to json serialization"""
if hasattr(self, 'stripped') and self.stripped:
return self.kvpairs
return {'databynode': self.kvpairs}
def strip_node(self, node):
self.stripped = True
if self.kvpairs is not None:
self.kvpairs = self.kvpairs[node]
def html(self, extension=''):
#this is used to facilitate the api explorer feature
if not hasattr(self, 'stripped'):
self.stripped = False
if not hasattr(self, 'notnode'):
self.notnode = False
if self.stripped or self.notnode:
return self._generic_html_value(self.kvpairs)
if not self.stripped:
htmlout = ''
for node in self.kvpairs.iterkeys():
htmlout += '{0}:{1}\n'.format(
node, self._generic_html_value(self.kvpairs[node]))
return htmlout
def _generic_html_value(self, pairs):
snippet = ""
for key in pairs.iterkeys():
val = pairs[key]
value = self.defaultvalue
valtype = self.defaulttype
notes = []
if val is not None and 'value' in val:
value = val['value']
if 'inheritedfrom' in val:
notes.append('Inherited from %s' % val['inheritedfrom'])
if 'expression' in val:
notes.append(
'Derived from expression "%s"' % val['expression'])
elif val is not None and 'expression' in val and 'broken' in val:
value = "*BROKEN*"
notes.append(
'Derived from expression "%s"' % val['expression'])
notes.append('Broken because of %s' % val['broken'])
elif val is not None and 'expression' in val:
value = val['expression']
if value is None:
value = ''
if val is not None and value == '' and 'isset' in val and val[
'isset'] is True:
# an encrypted value, put some *** to show it is set
# in the explorer
if 'inheritedfrom' in val:
notes.append('Inherited from %s' % val['inheritedfrom'])
value = '********'
if isinstance(val, list):
snippet += key + ":"
if len(val) == 0 and not self.readonly:
snippet += ('<input type="{0}" name="{1}" value="" '
' "title="{2}">'
).format(valtype, key, self.desc)
for v in val:
if self.readonly:
snippet += _htmlify_structure(v)
else:
snippet += ('<input type="{0}" name="{1}" value="{2}" '
' "title="{3}">'
).format(valtype, key, v, self.desc)
if not self.readonly:
snippet += (
'<input type="{0}" name="{1}" value="" title="{2}">'
'<input type="checkbox" name="restexplorerhonorkey" '
'value="{1}">').format(valtype, key, self.desc)
return snippet
if self.readonly:
snippet += "{0}: {1}".format(key, value)
else:
snippet += (key + ":" +
'<input type="{0}" name="{1}" value="{2}" '
'title="{3}"><input type="checkbox" '
'name="restexplorerhonorkey" value="{1}">'
).format(valtype, key, value, self.desc)
if len(notes) > 0:
snippet += '(' + ','.join(notes) + ')'
return snippet
class ConfluentNodeError(object):
def __init__(self, node, errorstr):
self.node = node
self.error = errorstr
def raw(self):
return {'databynode': {self.node: {'error': self.error}}}
def html(self):
return self.node + ":" + self.error
def strip_node(self, node):
#NOTE(jbjohnso): For single node errors, raise exception to
#trigger what a developer of that medium would expect
raise Exception(self.error)
class ConfluentTargetTimeout(ConfluentNodeError):
def __init__(self, node, errstr='timeout'):
self.node = node
self.error = errstr
def strip_node(self, node):
raise exc.TargetEndpointUnreachable(self.error)
class ConfluentTargetNotFound(ConfluentNodeError):
def __init__(self, node, errorstr='not found'):
self.node = node
self.error = errorstr
def strip_node(self, node):
raise exc.NotFoundException(self.error)
class ConfluentTargetInvalidCredentials(ConfluentNodeError):
def __init__(self, node):
self.node = node
self.error = 'bad credentials'
def strip_node(self, node):
raise exc.TargetEndpointBadCredentials
class DeletedResource(ConfluentMessage):
def __init__(self, resource):
self.kvpairs = {}
class ConfluentChoiceMessage(ConfluentMessage):
valid_values = set()
valid_paramset = {}
def __init__(self, node, state):
self.stripped = False
self.kvpairs = {
node: {
self.keyname: {'value': state},
}
}
def html(self, extension=''):
if hasattr(self, 'stripped') and self.stripped:
return self._create_option(self.kvpairs)
else:
htmlout = ''
for node in self.kvpairs.iterkeys():
htmlout += '{0}:{1}\n'.format(
node, self._create_option(self.kvpairs[node]))
return htmlout
def _create_option(self, pairdata):
snippet = ''
for key in pairdata.iterkeys():
val = pairdata[key]
snippet += key + ':<select name="%s">' % key
valid_values = self.valid_values
if key in self.valid_paramset:
valid_values = self.valid_paramset[key]
for opt in valid_values:
if opt == val['value']:
snippet += '<option value="%s" selected>%s</option>\r' % (
opt, opt)
else:
snippet += '<option value="%s">%s</option>\r' % (opt, opt)
snippet += '</select>'
snippet += '<input type="checkbox" name="restexplorerhonorkey" '
snippet += 'value="{0}"><br>\r'.format(key)
return snippet
class LinkRelation(ConfluentMessage):
kvpairs = None
def __init__(self):
self.href = ''
self.rel = ''
def json(self):
"""Provide json_hal style representation of the relation.
This currently only makes sense for the socket api.
"""
return {self.rel: '{ "href": "%s" }' % self.href}
def raw(self):
"""Provide python structure of the relation.
This currently is only sensible to consume from httpapi.
"""
return {self.rel: {"href": self.href}}
def html(self, extension=''):
"""Provide an html representation of the link relation.
This is used by the API explorer aspect of httpapi"""
return '<a href="{0}{2}" rel="{1}">{0}{2}</a>'.format(self.href,
self.rel,
extension)
# return '<a href="%s" rel="%s">%s</a><input type="submit"
# name="restexprerorop" value="delete:%s"' % (self.href, self.rel,
# self.href, self.href)
class ChildCollection(LinkRelation):
def __init__(self, collname, candelete=False):
self.rel = 'item'
self.href = collname
self.candelete = candelete
def html(self, extension=''):
if self.candelete:
return (
'<a href="{0}{2}" rel="{1}">{0}{2}</a> . . . . . . . . . . . . '
'<button type="submit" name="restexplorerop" '
'value="delete" formaction="{0}">delete'
'</button>').format(self.href, self.rel, extension)
else:
return '<a href="{0}{1}" rel="{0}">{0}{1}</a>'.format(self.href,
extension)
def get_input_message(path, operation, inputdata, nodes=None):
if path[0] == 'power' and path[1] == 'state' and operation != 'retrieve':
return InputPowerMessage(path, nodes, inputdata)
elif path[0] in ('attributes', 'users') and operation != 'retrieve':
return InputAttributes(path, inputdata, nodes)
elif path == ['boot', 'nextdevice'] and operation != 'retrieve':
return InputBootDevice(path, nodes, inputdata)
elif path == ['identify'] and operation != 'retrieve':
return InputIdentifyMessage(path, nodes, inputdata)
elif inputdata:
raise exc.InvalidArgumentException()
class InputAttributes(ConfluentMessage):
def __init__(self, path, inputdata, nodes=None):
self.nodeattribs = {}
nestedmode = False
if not inputdata:
raise exc.InvalidArgumentException('no request data provided')
if nodes is None:
self.attribs = inputdata
for attrib in self.attribs:
if type(self.attribs[attrib]) in (str, unicode):
try:
# ok, try to use format against the string
# store back result to the attribute to
# handle things like '{{' and '}}'
# if any weird sort of error should
# happen, it means the string has something
# that formatter is looking to fulfill, but
# is unable to do so, meaning it is an expression
tv = self.attribs[attrib].format()
self.attribs[attrib] = tv
except (KeyError, IndexError):
# this means format() actually thought there was work
# that suggested parameters, push it in as an
# expression
self.attribs[attrib] = {
'expression': self.attribs[attrib]}
return
for node in nodes:
if node in inputdata:
nestedmode = True
self.nodeattribs[node] = inputdata[node]
if nestedmode:
for key in inputdata:
if key not in nodes:
raise exc.InvalidArgumentException
else:
for node in nodes:
self.nodeattribs[node] = inputdata
def get_attributes(self, node):
if node not in self.nodeattribs:
return {}
nodeattr = self.nodeattribs[node]
for attr in nodeattr:
if type(nodeattr[attr]) in (str, unicode):
try:
# as above, use format() to see if string follows
# expression, store value back in case of escapes
tv = nodeattr[attr].format()
nodeattr[attr] = tv
except (KeyError, IndexError):
# an expression string will error if format() done
# use that as cue to put it into config as an expr
nodeattr[attr] = {'expression': nodeattr[attr]}
return nodeattr
class ConfluentInputMessage(ConfluentMessage):
keyname = 'state'
def __init__(self, path, nodes, inputdata):
self.inputbynode = {}
self.stripped = False
if not inputdata:
raise exc.InvalidArgumentException('missing input data')
if self.keyname not in inputdata:
#assume we have nested information
for key in nodes:
if key not in inputdata:
raise exc.InvalidArgumentException(key + ' not in request')
datum = inputdata[key]
if self.keyname not in datum:
raise exc.InvalidArgumentException(
'missing {0} argument'.format(self.keyname))
elif datum[self.keyname] not in self.valid_values:
raise exc.InvalidArgumentException(
datum[self.keyname] + ' is not one of ' +
','.join(self.valid_values))
self.inputbynode[key] = datum[self.keyname]
else: # we have a state argument not by node
datum = inputdata
if self.keyname not in datum:
raise exc.InvalidArgumentException('missing {0} argument'.format(self.keyname))
elif datum[self.keyname] not in self.valid_values:
raise exc.InvalidArgumentException(datum[self.keyname] +
' is not one of ' +
','.join(self.valid_values))
for node in nodes:
self.inputbynode[node] = datum[self.keyname]
class InputIdentifyMessage(ConfluentInputMessage):
valid_values = set([
'on',
'off',
])
keyname = 'identify'
class InputPowerMessage(ConfluentInputMessage):
valid_values = set([
'on',
'off',
'reset',
'boot',
])
def powerstate(self, node):
return self.inputbynode[node]
class BootDevice(ConfluentChoiceMessage):
valid_values = set([
'network',
'hd',
'setup',
'default',
'cd',
])
valid_bootmodes = set([
'unspecified',
'bios',
'uefi',
])
valid_paramset = {
'bootmode': valid_bootmodes,
}
def __init__(self, node, device, bootmode='unspecified'):
if device not in self.valid_values:
raise Exception("Invalid boot device argument passed in:" +
repr(device))
if bootmode not in self.valid_bootmodes:
raise Exception("Invalid boot mode argument passed in:" +
repr(bootmode))
self.kvpairs = {
node: {
'nextdevice': {'value': device},
'bootmode': {'value': bootmode },
}
}
class InputBootDevice(BootDevice):
def __init__(self, path, nodes, inputdata):
self.bootdevbynode = {}
self.bootmodebynode = {}
if not inputdata:
raise exc.InvalidArgumentException()
if 'nextdevice' not in inputdata:
for key in nodes:
if key not in inputdata:
raise exc.InvalidArgumentException(key + ' not in request')
datum = inputdata[key]
if 'nextdevice' not in datum:
raise exc.InvalidArgumentException(
'missing nextdevice argument')
elif datum['nextdevice'] not in self.valid_values:
raise exc.InvalidArgumentException(
datum['nextdevice'] + ' is not one of ' +
','.join(self.valid_values))
self.bootdevbynode[key] = datum['nextdevice']
if 'bootmode' in datum:
if datum['bootmode'] not in self.valid_bootmodes:
raise exc.InvalidArgumentException(
datum['bootmode'] + ' is not one of ' +
','.join(self.valid_bootmodes))
self.bootmodebynode[key] = datum['bootmode']
else:
datum = inputdata
if 'nextdevice' not in datum:
raise exc.InvalidArgumentException(
'missing nextdevice argument')
elif datum['nextdevice'] not in self.valid_values:
raise exc.InvalidArgumentException(
datum['nextdevice'] + ' is not one of ' +
','.join(self.valid_values))
for node in nodes:
self.bootdevbynode[node] = datum['nextdevice']
if 'bootmode' in datum:
self.bootmodebynode[node] = datum['bootmode']
def bootdevice(self, node):
return self.bootdevbynode[node]
def bootmode(self, node):
return self.bootmodebynode.get(node, 'unspecified')
class IdentifyState(ConfluentChoiceMessage):
valid_values = set([
'', # allowed for output to indicate write-only support
'on',
'off',
])
keyname = 'identify'
class PowerState(ConfluentChoiceMessage):
valid_values = set([
'on',
'off',
'reset',
'boot',
])
keyname = 'state'
class SensorReadings(ConfluentMessage):
readonly = True
def __init__(self, sensors=(), name=None):
readings = []
self.notnode = name is None
for sensor in sensors:
sensordict = {'name': sensor['name']}
if 'value' in sensor:
sensordict['value'] = sensor['value']
if 'units' in sensor:
sensordict['units'] = sensor['units']
if 'states' in sensor:
sensordict['states'] = sensor['states']
if 'health' in sensor:
sensordict['health'] = sensor['health']
readings.append(sensordict)
if self.notnode:
self.kvpairs = {'sensors': readings}
else:
self.kvpairs = {name: {'sensors': readings}}
class HealthSummary(ConfluentMessage):
readonly = True
valid_values = set([
'ok',
'warning',
'critical',
'failed',
])
def __init__(self, health, name=None):
self.stripped = False
self.notnode = name is None
if health not in self.valid_values:
raise ValueError("%d is not a valid health state" % health)
if self.notnode:
self.kvpairs = {'health': {'value': health}}
else:
self.kvpairs = {name: {'health': {'value': health}}}
class Attributes(ConfluentMessage):
def __init__(self, name=None, kv=None, desc=''):
self.desc = desc
nkv = {}
self.notnode = name is None
for key in kv.iterkeys():
if type(kv[key]) in (str, unicode):
nkv[key] = {'value': kv[key]}
else:
nkv[key] = kv[key]
if self.notnode:
self.kvpairs = nkv
else:
self.kvpairs = {
name: nkv
}
class ListAttributes(ConfluentMessage):
def __init__(self, name=None, kv=None, desc=''):
self.desc = desc
self.notnode = name is None
if self.notnode:
self.kvpairs = kv
else:
self.kvpairs = {name: kv}
class CryptedAttributes(Attributes):
defaulttype = 'password'
def __init__(self, name=None, kv=None, desc=''):
# for now, just keep the dictionary keys and discard crypt value
self.desc = desc
nkv = {}
for key in kv.iterkeys():
nkv[key] = {'isset': False}
try:
if kv[key] is not None and kv[key]['cryptvalue'] != '':
nkv[key] = {'isset': True}
nkv[key]['inheritedfrom'] = kv[key]['inheritedfrom']
except KeyError:
pass
self.notnode = name is None
if self.notnode:
self.kvpairs = nkv
else:
self.kvpairs = {
name: nkv
}
| michaelfardu/thinkconfluent | confluent_server/confluent/messages.py | Python | apache-2.0 | 21,993 |
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler import solvers
from nova.scheduler.solvers.constraints import \
aggregate_image_properties_isolation
from nova import test
from nova.tests.scheduler import solver_scheduler_fakes as fakes
class TestAggregateImagePropertiesIsolationConstraint(test.NoDBTestCase):
def setUp(self):
super(TestAggregateImagePropertiesIsolationConstraint, self).setUp()
self.constraint_cls = aggregate_image_properties_isolation.\
AggregateImagePropertiesIsolationConstraint
self._generate_fake_constraint_input()
def _generate_fake_constraint_input(self):
self.fake_variables = solvers.BaseVariables()
self.fake_variables.host_instance_matrix = [
['h0i0', 'h0i1', 'h0i2'],
['h1i0', 'h1i1', 'h1i2']]
self.fake_filter_properties = {
'instance_uuids': ['fake_uuid_%s' % x for x in range(3)],
'num_instances': 3}
host1 = fakes.FakeSolverSchedulerHostState('host1', 'node1', {})
host2 = fakes.FakeSolverSchedulerHostState('host2', 'node1', {})
self.fake_hosts = [host1, host2]
@mock.patch('nova.scheduler.solvers.constraints.'
'aggregate_image_properties_isolation.'
'AggregateImagePropertiesIsolationConstraint.host_filter_cls')
def test_aggregate_image_properties_isolation_get_components(
self, mock_filter_cls):
expected_cons_vars = [['h1i0'], ['h1i1'], ['h1i2']]
expected_cons_coeffs = [[1], [1], [1]]
expected_cons_consts = [0, 0, 0]
expected_cons_ops = ['==', '==', '==']
mock_filter = mock_filter_cls.return_value
mock_filter.host_passes.side_effect = [True, False]
cons_vars, cons_coeffs, cons_consts, cons_ops = (
self.constraint_cls().get_components(self.fake_variables,
self.fake_hosts, self.fake_filter_properties))
self.assertEqual(expected_cons_vars, cons_vars)
self.assertEqual(expected_cons_coeffs, cons_coeffs)
self.assertEqual(expected_cons_consts, cons_consts)
self.assertEqual(expected_cons_ops, cons_ops)
| CiscoSystems/nova-solver-scheduler | nova/tests/scheduler/solvers/constraints/test_aggregate_image_properties_isolation.py | Python | apache-2.0 | 2,908 |
# Copyright 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class TestCobalt:
def test_stop_signals(self, mocker):
pass
| PressLabs/cobalt | tests/unit/cobalt/test_cobalt.py | Python | apache-2.0 | 650 |
import resume.models as rmod
import random
import logging
from django.http import HttpResponse
from datetime import date
logger = logging.getLogger('default')
def generate(request):
cs_objs = rmod.Department.objects.filter(shortname='cs')
if len(cs_objs) == 0:
logger.info('created cs dept')
cs = rmod.Department(name='Computer Science', shortname='cs', lastChange=0,\
headerImage='', logoImage='', resumeImage='', headerBgImage='',\
brandColor='blue', contactName='Donald Knuth', contactEmail='[email protected]',\
techEmail='[email protected]')
cs.save()
else:
logger.info('used pre-existing cs dept')
cs = cs_objs[0]
ct_objs = rmod.ComponentType.objects.filter(short='ta')
if len(ct_objs) == 0:
logger.info('created component type')
ct = rmod.ComponentType(type='contactlong', name='type a', short='ta', department=cs)
ct.save()
else:
logger.info('used existing component type')
ct = ct_objs[0]
ct_objs = rmod.ComponentType.objects.filter(short='stmt')
if len(ct_objs) == 0:
logger.info('created component type')
ct = rmod.ComponentType(type='statement', name='Research Statement', short='stmt', department=cs)
ct.save()
else:
logger.info('used existing component type')
ct = ct_objs[0]
auth_objs = rmod.AuthInfo.objects.all()
if len(auth_objs) == 0:
return HttpResponse("No auth_info objects to use")
auth = auth_objs[0]
pos_objs = rmod.ApplicantPosition.objects.filter(name='pos1')
if len(pos_objs) == 0:
logger.info('created app position')
pos = rmod.ApplicantPosition(department=cs, name='pos1', shortform='p1',\
autoemail=False)
pos.save()
else:
logger.info('used existing app position')
pos = pos_objs[0]
a_objs = rmod.Applicant.objects.filter(auth=auth)
if len(a_objs) == 0:
logger.error('ERROR: created applicant')
a = rmod.Applicant(auth=auth, firstname='john', lastname='doe', country='usa',\
department=cs, position=pos)
a.save()
else:
logger.info('used existing applicant')
a = a_objs[0]
c_objs = rmod.Component.objects.filter(applicant=a)
if len(c_objs) == 0:
logger.info('created component')
c = rmod.Component(applicant=a, type=ct, value='component 1', lastSubmitted=0,\
department=cs)
c.save()
else:
logger.info('used existing component')
c = c_objs[0]
reviewer_objs = rmod.Reviewer.objects.filter(auth=auth)
if len(reviewer_objs) == 0:
logger.info('created reviewer')
reviewer = rmod.Reviewer(auth=auth, department=cs)
reviewer.save()
else:
logger.info('used existing reviewer')
reviewer = reviewer_objs[0]
review_objs = rmod.Review.objects.filter(applicant=a)
if len(review_objs) == 0:
logger.info('created review')
review = rmod.Review(applicant=a, reviewer=reviewer, advocate='advocate',\
comments='this shit sucks', draft=False, department=cs)
review.save()
else:
logger.info('used existing review')
review = review_objs[0]
area_objs = rmod.Area.objects.filter(department=cs)
if len(area_objs) < 2:
a = rmod.Area(name='area two', abbr='a2', department=cs)
a.save()
a = rmod.Area(name='area one', abbr='a1', department=cs)
a.save()
score_cats = rmod.ScoreCategory.objects.filter(department=cs)
if len(score_cats) == 0:
sc = rmod.ScoreCategory(name='Awesomeness Level', shortform='AL', department=cs)
sc.save()
else:
sc = score_cats[0]
score_vals = rmod.ScoreValue.objects.filter(department=cs)
if len(score_vals) == 0:
for i in range(5):
sv = rmod.ScoreValue(category=sc, number=i, explanation='%d level of awesome' % i,\
department=cs)
sv.save()
return HttpResponse('OK')
| brownplt/k3 | dj-resume/resume/generate.py | Python | apache-2.0 | 3,738 |
import cython
def test_sizeof():
"""
>>> test_sizeof()
True
True
True
True
True
"""
x = cython.declare(cython.bint)
print(cython.sizeof(x) == cython.sizeof(cython.bint))
print(cython.sizeof(cython.char) <= cython.sizeof(cython.short) <= cython.sizeof(cython.int) <= cython.sizeof(cython.long) <= cython.sizeof(cython.longlong))
print(cython.sizeof(cython.uint) == cython.sizeof(cython.int))
print(cython.sizeof(cython.p_int) == cython.sizeof(cython.p_double))
if cython.compiled:
print(cython.sizeof(cython.char) < cython.sizeof(cython.longlong))
else:
print(cython.sizeof(cython.char) == 1)
## CURRENTLY BROKEN - FIXME!!
## def test_declare(n):
## """
## >>> test_declare(100)
## (100, 100)
## >>> test_declare(100.5)
## (100, 100)
## >>> test_declare(None)
## Traceback (most recent call last):
## ...
## TypeError: an integer is required
## """
## x = cython.declare(cython.int)
## y = cython.declare(cython.int, n)
## if cython.compiled:
## cython.declare(xx=cython.int, yy=cython.long)
## i = sizeof(xx)
## ptr = cython.declare(cython.p_int, cython.address(y))
## return y, ptr[0]
@cython.locals(x=cython.double, n=cython.int)
def test_cast(x):
"""
>>> test_cast(1.5)
1
"""
n = cython.cast(cython.int, x)
return n
@cython.locals(x=cython.int, y=cython.p_int)
def test_address(x):
"""
>>> test_address(39)
39
"""
y = cython.address(x)
return y[0]
## CURRENTLY BROKEN - FIXME!!
## @cython.locals(x=cython.int)
## @cython.locals(y=cython.bint)
## def test_locals(x):
## """
## >>> test_locals(5)
## True
## """
## y = x
## return y
def test_with_nogil(nogil):
"""
>>> raised = []
>>> class nogil(object):
... def __enter__(self):
... pass
... def __exit__(self, exc_class, exc, tb):
... raised.append(exc)
... return exc_class is None
>>> test_with_nogil(nogil())
WORKS
True
>>> raised
[None]
"""
result = False
with nogil:
print("WORKS")
with cython.nogil:
result = True
return result
## CURRENTLY BROKEN - FIXME!!
## MyUnion = cython.union(n=cython.int, x=cython.double)
## MyStruct = cython.struct(is_integral=cython.bint, data=MyUnion)
## MyStruct2 = cython.typedef(MyStruct[2])
## def test_struct(n, x):
## """
## >>> test_struct(389, 1.64493)
## (389, 1.64493)
## """
## a = cython.declare(MyStruct2)
## a[0] = MyStruct(True, data=MyUnion(n=n))
## a[1] = MyStruct(is_integral=False, data={'x': x})
## return a[0].data.n, a[1].data.x
import cython as cy
from cython import declare, cast, locals, address, typedef, p_void, compiled
from cython import declare as my_declare, locals as my_locals, p_void as my_void_star, typedef as my_typedef, compiled as my_compiled
@my_locals(a=cython.p_void)
def test_imports():
"""
>>> test_imports() # (True, True)
True
"""
a = cython.NULL
b = declare(p_void, cython.NULL)
c = my_declare(my_void_star, cython.NULL)
d = cy.declare(cy.p_void, cython.NULL)
## CURRENTLY BROKEN - FIXME!!
#return a == d, compiled == my_compiled
return compiled == my_compiled
## CURRENTLY BROKEN - FIXME!!
## MyStruct3 = typedef(MyStruct[3])
## MyStruct4 = my_typedef(MyStruct[4])
## MyStruct5 = cy.typedef(MyStruct[5])
def test_declare_c_types(n):
"""
>>> test_declare_c_types(0)
>>> test_declare_c_types(1)
>>> test_declare_c_types(2)
"""
#
b00 = cython.declare(cython.bint, 0)
b01 = cython.declare(cython.bint, 1)
b02 = cython.declare(cython.bint, 2)
#
i00 = cython.declare(cython.uchar, n)
i01 = cython.declare(cython.char, n)
i02 = cython.declare(cython.schar, n)
i03 = cython.declare(cython.ushort, n)
i04 = cython.declare(cython.short, n)
i05 = cython.declare(cython.sshort, n)
i06 = cython.declare(cython.uint, n)
i07 = cython.declare(cython.int, n)
i08 = cython.declare(cython.sint, n)
i09 = cython.declare(cython.slong, n)
i10 = cython.declare(cython.long, n)
i11 = cython.declare(cython.ulong, n)
i12 = cython.declare(cython.slonglong, n)
i13 = cython.declare(cython.longlong, n)
i14 = cython.declare(cython.ulonglong, n)
i20 = cython.declare(cython.Py_ssize_t, n)
i21 = cython.declare(cython.size_t, n)
#
f00 = cython.declare(cython.float, n)
f01 = cython.declare(cython.double, n)
f02 = cython.declare(cython.longdouble, n)
#
#z00 = cython.declare(cython.complex, n+1j)
#z01 = cython.declare(cython.floatcomplex, n+1j)
#z02 = cython.declare(cython.doublecomplex, n+1j)
#z03 = cython.declare(cython.longdoublecomplex, n+1j)
| bzzzz/cython | tests/run/pure_py.py | Python | apache-2.0 | 4,851 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import mock
import requests
GITHUB_TRACKER = "dci.trackers.github.requests"
BUGZILLA_TRACKER = "dci.trackers.bugzilla.requests"
def test_attach_issue_to_job(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
issue = user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data).data
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["issues"][0]["id"] == issue["issue"]["id"]
assert result["issues"][0]["url"] == data["url"]
def test_attach_issue_to_component(admin, user, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_user_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
component_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % component_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
admin.post("/api/v1/components/%s/issues" % component_id, data=data)
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["issues"][0]["url"] == data["url"]
def test_attach_invalid_issue(admin, job_user_id, topic_user_id):
data = {"url": '<script>alert("booo")</script>', "topic_id": topic_user_id}
r = admin.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
assert r.status_code == 400
def test_unattach_issue_from_job(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
result = user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
issue_id = result.data["issue"]["id"]
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["_meta"]["count"] == 1
user.delete("/api/v1/jobs/%s/issues/%s" % (job_user_id, issue_id))
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data
assert result["_meta"]["count"] == 0
def test_unattach_issue_from_component(admin, user, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_user_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
component_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % component_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
result = admin.post("/api/v1/components/%s/issues" % component_id, data=data)
issue_id = result.data["issue"]["id"]
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["_meta"]["count"] == 1
user.delete("/api/v1/components/%s/issues/%s" % (component_id, issue_id))
result = user.get("/api/v1/components/%s/issues" % component_id).data
assert result["_meta"]["count"] == 0
def test_github_tracker(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 200
mock_github_result.json.return_value = {
"number": 1, # issue_id
"title": "Create a GET handler for /componenttype/<ct_name>",
"user": {"login": "Spredzy"}, # reporter
"assignee": None,
"state": "closed", # status
"product": "redhat-cip",
"component": "dci-control-server",
"created_at": "2015-12-09T09:29:26Z",
"updated_at": "2015-12-18T15:19:41Z",
"closed_at": "2015-12-18T15:19:41Z",
}
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data["issues"][0]
assert result["status_code"] == 200
assert result["issue_id"] == 1
assert result["title"] == ("Create a GET handler for /componenttype/<ct_name>")
assert result["reporter"] == "Spredzy"
assert result["status"] == "closed"
assert result["product"] == "redhat-cip"
assert result["component"] == "dci-control-server"
assert result["created_at"] == "2015-12-09T09:29:26Z"
assert result["updated_at"] == "2015-12-18T15:19:41Z"
assert result["closed_at"] == "2015-12-18T15:19:41Z"
assert result["assignee"] is None
def test_github_tracker_with_private_issue(user, job_user_id, topic_user_id):
with mock.patch(GITHUB_TRACKER, spec=requests) as mock_github_request:
mock_github_result = mock.Mock()
mock_github_request.get.return_value = mock_github_result
mock_github_result.status_code = 404
data = {
"url": "https://github.com/redhat-cip/dci-control-server/issues/1",
"topic_id": topic_user_id,
}
user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data["issues"][0]
assert result["status_code"] == 404
assert result["issue_id"] == 1
assert result["title"] == "private issue"
assert result["reporter"] is None
assert result["status"] is None
assert result["product"] == "redhat-cip"
assert result["component"] == "dci-control-server"
assert result["created_at"] is None
assert result["updated_at"] is None
assert result["closed_at"] is None
assert result["assignee"] is None
def test_bugzilla_tracker(user, job_user_id, topic_user_id):
with mock.patch(BUGZILLA_TRACKER, spec=requests) as mock_bugzilla_request:
mock_bugzilla_result = mock.Mock()
mock_bugzilla_request.get.return_value = mock_bugzilla_result
mock_bugzilla_result.status_code = 200
mock_bugzilla_result.content = """
<bugzilla version="4.4.12051.1"
urlbase="https://bugzilla.redhat.com/"
maintainer="[email protected]" >
<bug>
<bug_id>1184949</bug_id>
<creation_ts>2015-01-22 09:46:00 -0500</creation_ts>
<short_desc>Timeouts in haproxy for keystone can be</short_desc>
<delta_ts>2016-06-29 18:50:43 -0400</delta_ts>
<product>Red Hat OpenStack</product>
<component>rubygem-staypuft</component>
<bug_status>NEW</bug_status>
<reporter name="Alfredo Moralejo">amoralej</reporter>
<assigned_to name="Mike Burns">mburns</assigned_to>
</bug>
</bugzilla>
"""
data = {
"url": "https://bugzilla.redhat.com/show_bug.cgi?id=1184949",
"topic_id": topic_user_id,
}
user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data["issues"][0]
assert result["status_code"] == 200
assert result["issue_id"] == "1184949"
assert result["title"] == "Timeouts in haproxy for keystone can be"
assert result["reporter"] == "amoralej"
assert result["assignee"] == "mburns"
assert result["status"] == "NEW"
assert result["product"] == "Red Hat OpenStack"
assert result["component"] == "rubygem-staypuft"
assert result["created_at"] == "2015-01-22 09:46:00 -0500"
assert result["updated_at"] == "2016-06-29 18:50:43 -0400"
assert result["closed_at"] is None
def test_bugzilla_tracker_with_non_existent_issue(user, job_user_id, topic_user_id):
with mock.patch(BUGZILLA_TRACKER, spec=requests) as mock_bugzilla_request:
mock_bugzilla_result = mock.Mock()
mock_bugzilla_request.get.return_value = mock_bugzilla_result
mock_bugzilla_result.status_code = 400
data = {
"url": "https://bugzilla.redhat.com/show_bug.cgi?id=1184949",
"topic_id": topic_user_id,
}
user.post("/api/v1/jobs/%s/issues" % job_user_id, data=data)
result = user.get("/api/v1/jobs/%s/issues" % job_user_id).data["issues"][0]
assert result["status_code"] == 400
assert result["issue_id"] is None
assert result["title"] is None
assert result["reporter"] is None
assert result["assignee"] is None
assert result["status"] is None
assert result["product"] is None
assert result["component"] is None
assert result["created_at"] is None
assert result["updated_at"] is None
assert result["closed_at"] is None
def test_create_get_issues(user, topic_user_id):
issues = user.get("/api/v1/issues")
assert issues.data["issues"] == []
pissue = user.post(
"/api/v1/issues", data={"url": "http://bugzilla/42", "topic_id": topic_user_id}
)
assert pissue.status_code == 201
pissue_id1 = pissue.data["issue"]["id"]
pissue = user.post(
"/api/v1/issues", data={"url": "http://bugzilla/43", "topic_id": topic_user_id}
)
assert pissue.status_code == 201
pissue_id2 = pissue.data["issue"]["id"]
issues = user.get("/api/v1/issues")
assert len(issues.data["issues"]) == 2
assert set([pissue_id1, pissue_id2]) == {i["id"] for i in issues.data["issues"]}
g_issue_id1 = user.get("/api/v1/issues/%s" % pissue_id1)
assert g_issue_id1.status_code == 200
assert g_issue_id1.data["issue"]["url"] == "http://bugzilla/42"
g_issue_id2 = user.get("/api/v1/issues/%s" % pissue_id2)
assert g_issue_id2.status_code == 200
assert g_issue_id2.data["issue"]["url"] == "http://bugzilla/43"
def test_delete_issues(admin, user, topic_user_id):
pissue = user.post(
"/api/v1/issues", data={"url": "http://bugzilla/42", "topic_id": topic_user_id}
)
assert pissue.status_code == 201
pissue_id1 = pissue.data["issue"]["id"]
issues = user.get("/api/v1/issues")
assert len(issues.data["issues"]) == 1
pissue_etag = pissue.headers.get("ETag")
dissue = admin.delete(
"/api/v1/issues/%s" % pissue_id1, headers={"If-match": pissue_etag}
)
assert dissue.status_code == 204
issues = user.get("/api/v1/issues")
assert len(issues.data["issues"]) == 0
# test issues - tests
def test_crd_test_to_issue(admin, user, topic_user_id):
pissue = user.post(
"/api/v1/issues", data={"url": "http://bugzilla/42", "topic_id": topic_user_id}
)
pissue_id1 = pissue.data["issue"]["id"]
test = user.post("/api/v1/tests", data={"name": "pname1"})
test_id1 = test.data["test"]["id"]
# 0 tests from issues pissue_id1
issues_tests = user.get("/api/v1/issues/%s/tests" % pissue_id1)
assert len(issues_tests.data["tests"]) == 0
# associate test_id1 to issue pissue_id1
ptest = user.post(
"/api/v1/issues/%s/tests" % pissue_id1, data={"test_id": test_id1}
)
assert ptest.status_code == 201
issues_tests = user.get("/api/v1/issues/%s/tests" % pissue_id1)
assert len(issues_tests.data["tests"]) == 1
assert issues_tests.data["tests"][0]["id"] == test_id1
# remove test_id1 from issue pissue_id1
admin.delete("/api/v1/issues/%s/tests/%s" % (pissue_id1, test_id1))
issues_tests = user.get("/api/v1/issues/%s/tests" % pissue_id1)
assert len(issues_tests.data["tests"]) == 0
| redhat-cip/dci-control-server | tests/api/v1/test_issues.py | Python | apache-2.0 | 15,798 |
import os
from dataclasses import dataclass
from typing import Iterable
from dbt.contracts.graph.manifest import SourceFile
from dbt.contracts.graph.parsed import ParsedSqlNode, ParsedMacro
from dbt.contracts.graph.unparsed import UnparsedMacro
from dbt.exceptions import InternalException
from dbt.node_types import NodeType
from dbt.parser.base import SimpleSQLParser
from dbt.parser.macros import MacroParser
from dbt.parser.search import FileBlock
@dataclass
class SqlBlock(FileBlock):
block_name: str
@property
def name(self):
return self.block_name
class SqlBlockParser(SimpleSQLParser[ParsedSqlNode]):
def parse_from_dict(self, dct, validate=True) -> ParsedSqlNode:
if validate:
ParsedSqlNode.validate(dct)
return ParsedSqlNode.from_dict(dct)
@property
def resource_type(self) -> NodeType:
return NodeType.SqlOperation
@staticmethod
def get_compiled_path(block: FileBlock):
# we do it this way to make mypy happy
if not isinstance(block, SqlBlock):
raise InternalException(
'While parsing SQL operation, got an actual file block instead of '
'an SQL block: {}'.format(block)
)
return os.path.join('sql', block.name)
def parse_remote(self, sql: str, name: str) -> ParsedSqlNode:
source_file = SourceFile.remote(sql, self.project.project_name)
contents = SqlBlock(block_name=name, file=source_file)
return self.parse_node(contents)
class SqlMacroParser(MacroParser):
def parse_remote(self, contents) -> Iterable[ParsedMacro]:
base = UnparsedMacro(
path='from remote system',
original_file_path='from remote system',
package_name=self.project.project_name,
raw_sql=contents,
root_path=self.project.project_root,
resource_type=NodeType.Macro,
)
for node in self.parse_unparsed_macros(base):
yield node
| analyst-collective/dbt | core/dbt/parser/sql.py | Python | apache-2.0 | 2,013 |
"""Test the Balboa Spa Client config flow."""
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.balboa.const import CONF_SYNC_TIME, DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import BalboaMock
from tests.common import MockConfigEntry
TEST_DATA = {
CONF_HOST: "1.1.1.1",
}
TEST_ID = "FakeBalboa"
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.listen",
new=BalboaMock.listen,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.send_mod_ident_req",
new=BalboaMock.send_mod_ident_req,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.send_panel_req",
new=BalboaMock.send_panel_req,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.spa_configured",
new=BalboaMock.spa_configured,
), patch(
"homeassistant.components.balboa.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == TEST_DATA
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.broken_connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_already_configured(hass: HomeAssistant) -> None:
"""Test when provided credentials are already configured."""
MockConfigEntry(domain=DOMAIN, data=TEST_DATA, unique_id=TEST_ID).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.connect",
new=BalboaMock.connect,
), patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi.disconnect",
new=BalboaMock.disconnect,
), patch(
"homeassistant.components.balboa.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_options_flow(hass):
"""Test specifying non default settings using options flow."""
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_DATA, unique_id=TEST_ID)
config_entry.add_to_hass(hass)
# Rather than mocking out 15 or so functions, we just need to mock
# the entire library, otherwise it will get stuck in a listener and
# the various loops in pybalboa.
with patch(
"homeassistant.components.balboa.config_flow.BalboaSpaWifi",
new=BalboaMock,
), patch(
"homeassistant.components.balboa.BalboaSpaWifi",
new=BalboaMock,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SYNC_TIME: True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_SYNC_TIME: True}
| jawilson/home-assistant | tests/components/balboa/test_config_flow.py | Python | apache-2.0 | 5,737 |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import functools
import uuid
from dogpile.cache import api
from dogpile.cache import region as dp_region
import six
from keystone.common.cache.backends import mongo
from keystone import exception
from keystone.tests import unit as tests
# Mock database structure sample where 'ks_cache' is database and
# 'cache' is collection. Dogpile CachedValue data is divided in two
# fields `value` (CachedValue.payload) and `meta` (CachedValue.metadata)
ks_cache = {
"cache": [
{
"value": {
"serviceType": "identity",
"allVersionsUrl": "https://dummyUrl",
"dateLastModified": "ISODDate(2014-02-08T18:39:13.237Z)",
"serviceName": "Identity",
"enabled": "True"
},
"meta": {
"v": 1,
"ct": 1392371422.015121
},
"doc_date": "ISODate('2014-02-14T09:50:22.015Z')",
"_id": "8251dc95f63842719c077072f1047ddf"
},
{
"value": "dummyValueX",
"meta": {
"v": 1,
"ct": 1392371422.014058
},
"doc_date": "ISODate('2014-02-14T09:50:22.014Z')",
"_id": "66730b9534d146f0804d23729ad35436"
}
]
}
COLLECTIONS = {}
SON_MANIPULATOR = None
class MockCursor(object):
def __init__(self, collection, dataset_factory):
super(MockCursor, self).__init__()
self.collection = collection
self._factory = dataset_factory
self._dataset = self._factory()
self._limit = None
self._skip = None
def __iter__(self):
return self
def __next__(self):
if self._skip:
for _ in range(self._skip):
next(self._dataset)
self._skip = None
if self._limit is not None and self._limit <= 0:
raise StopIteration()
if self._limit is not None:
self._limit -= 1
return next(self._dataset)
next = __next__
def __getitem__(self, index):
arr = [x for x in self._dataset]
self._dataset = iter(arr)
return arr[index]
class MockCollection(object):
def __init__(self, db, name):
super(MockCollection, self).__init__()
self.name = name
self._collection_database = db
self._documents = {}
self.write_concern = {}
def __getattr__(self, name):
if name == 'database':
return self._collection_database
def ensure_index(self, key_or_list, *args, **kwargs):
pass
def index_information(self):
return {}
def find_one(self, spec_or_id=None, *args, **kwargs):
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {'_id': spec_or_id}
try:
return next(self.find(spec_or_id, *args, **kwargs))
except StopIteration:
return None
def find(self, spec=None, *args, **kwargs):
return MockCursor(self, functools.partial(self._get_dataset, spec))
def _get_dataset(self, spec):
dataset = (self._copy_doc(document, dict) for document in
self._iter_documents(spec))
return dataset
def _iter_documents(self, spec=None):
return (SON_MANIPULATOR.transform_outgoing(document, self) for
document in six.itervalues(self._documents)
if self._apply_filter(document, spec))
def _apply_filter(self, document, query):
for key, search in six.iteritems(query):
doc_val = document.get(key)
if isinstance(search, dict):
op_dict = {'$in': lambda dv, sv: dv in sv}
is_match = all(
op_str in op_dict and op_dict[op_str](doc_val, search_val)
for op_str, search_val in six.iteritems(search)
)
else:
is_match = doc_val == search
return is_match
def _copy_doc(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_doc(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_doc(value, container)
return new
else:
return copy.copy(obj)
def insert(self, data, manipulate=True, **kwargs):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def save(self, data, manipulate=True, **kwargs):
return self._insert(data)
def _insert(self, data):
if '_id' not in data:
data['_id'] = uuid.uuid4().hex
object_id = data['_id']
self._documents[object_id] = self._internalize_dict(data)
return object_id
def find_and_modify(self, spec, document, upsert=False, **kwargs):
self.update(spec, document, upsert, **kwargs)
def update(self, spec, document, upsert=False, **kwargs):
existing_docs = [doc for doc in six.itervalues(self._documents)
if self._apply_filter(doc, spec)]
if existing_docs:
existing_doc = existing_docs[0] # should find only 1 match
_id = existing_doc['_id']
existing_doc.clear()
existing_doc['_id'] = _id
existing_doc.update(self._internalize_dict(document))
elif upsert:
existing_doc = self._documents[self._insert(document)]
def _internalize_dict(self, d):
return dict((k, copy.deepcopy(v)) for k, v in six.iteritems(d))
def remove(self, spec_or_id=None, search_filter=None):
"""Remove objects matching spec_or_id from the collection."""
if spec_or_id is None:
spec_or_id = search_filter if search_filter else {}
if not isinstance(spec_or_id, dict):
spec_or_id = {'_id': spec_or_id}
to_delete = list(self.find(spec=spec_or_id))
for doc in to_delete:
doc_id = doc['_id']
del self._documents[doc_id]
return {
"connectionId": uuid.uuid4().hex,
"n": len(to_delete),
"ok": 1.0,
"err": None,
}
class MockMongoDB(object):
def __init__(self, dbname):
self._dbname = dbname
self.mainpulator = None
def authenticate(self, username, password):
pass
def add_son_manipulator(self, manipulator):
global SON_MANIPULATOR
SON_MANIPULATOR = manipulator
def __getattr__(self, name):
if name == 'authenticate':
return self.authenticate
elif name == 'name':
return self._dbname
elif name == 'add_son_manipulator':
return self.add_son_manipulator
else:
return get_collection(self._dbname, name)
def __getitem__(self, name):
return get_collection(self._dbname, name)
class MockMongoClient(object):
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, dbname):
return MockMongoDB(dbname)
def get_collection(db_name, collection_name):
mongo_collection = MockCollection(MockMongoDB(db_name), collection_name)
return mongo_collection
def pymongo_override():
global pymongo
import pymongo
if pymongo.MongoClient is not MockMongoClient:
pymongo.MongoClient = MockMongoClient
if pymongo.MongoReplicaSetClient is not MockMongoClient:
pymongo.MongoClient = MockMongoClient
class MyTransformer(mongo.BaseTransform):
"""Added here just to check manipulator logic is used correctly."""
def transform_incoming(self, son, collection):
return super(MyTransformer, self).transform_incoming(son, collection)
def transform_outgoing(self, son, collection):
return super(MyTransformer, self).transform_outgoing(son, collection)
class MongoCache(tests.BaseTestCase):
def setUp(self):
super(MongoCache, self).setUp()
global COLLECTIONS
COLLECTIONS = {}
mongo.MongoApi._DB = {}
mongo.MongoApi._MONGO_COLLS = {}
pymongo_override()
# using typical configuration
self.arguments = {
'db_hosts': 'localhost:27017',
'db_name': 'ks_cache',
'cache_collection': 'cache',
'username': 'test_user',
'password': 'test_password'
}
def test_missing_db_hosts(self):
self.arguments.pop('db_hosts')
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_missing_db_name(self):
self.arguments.pop('db_name')
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_missing_cache_collection_name(self):
self.arguments.pop('cache_collection')
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_incorrect_write_concern(self):
self.arguments['w'] = 'one value'
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_correct_write_concern(self):
self.arguments['w'] = 1
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue10")
# There is no proxy so can access MongoCacheBackend directly
self.assertEqual(region.backend.api.w, 1)
def test_incorrect_read_preference(self):
self.arguments['read_preference'] = 'inValidValue'
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
# As per delayed loading of pymongo, read_preference value should
# still be string and NOT enum
self.assertEqual(region.backend.api.read_preference,
'inValidValue')
random_key = uuid.uuid4().hex
self.assertRaises(ValueError, region.set,
random_key, "dummyValue10")
def test_correct_read_preference(self):
self.arguments['read_preference'] = 'secondaryPreferred'
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
# As per delayed loading of pymongo, read_preference value should
# still be string and NOT enum
self.assertEqual(region.backend.api.read_preference,
'secondaryPreferred')
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue10")
# Now as pymongo is loaded so expected read_preference value is enum.
# There is no proxy so can access MongoCacheBackend directly
self.assertEqual(region.backend.api.read_preference, 3)
def test_missing_replica_set_name(self):
self.arguments['use_replica'] = True
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_provided_replica_set_name(self):
self.arguments['use_replica'] = True
self.arguments['replicaset_name'] = 'my_replica'
dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
self.assertTrue(True) # reached here means no initialization error
def test_incorrect_mongo_ttl_seconds(self):
self.arguments['mongo_ttl_seconds'] = 'sixty'
region = dp_region.make_region()
self.assertRaises(exception.ValidationError, region.configure,
'keystone.cache.mongo',
arguments=self.arguments)
def test_cache_configuration_values_assertion(self):
self.arguments['use_replica'] = True
self.arguments['replicaset_name'] = 'my_replica'
self.arguments['mongo_ttl_seconds'] = 60
self.arguments['ssl'] = False
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
# There is no proxy so can access MongoCacheBackend directly
self.assertEqual(region.backend.api.hosts, 'localhost:27017')
self.assertEqual(region.backend.api.db_name, 'ks_cache')
self.assertEqual(region.backend.api.cache_collection, 'cache')
self.assertEqual(region.backend.api.username, 'test_user')
self.assertEqual(region.backend.api.password, 'test_password')
self.assertEqual(region.backend.api.use_replica, True)
self.assertEqual(region.backend.api.replicaset_name, 'my_replica')
self.assertEqual(region.backend.api.conn_kwargs['ssl'], False)
self.assertEqual(region.backend.api.ttl_seconds, 60)
def test_multiple_region_cache_configuration(self):
arguments1 = copy.copy(self.arguments)
arguments1['cache_collection'] = 'cache_region1'
region1 = dp_region.make_region().configure('keystone.cache.mongo',
arguments=arguments1)
# There is no proxy so can access MongoCacheBackend directly
self.assertEqual(region1.backend.api.hosts, 'localhost:27017')
self.assertEqual(region1.backend.api.db_name, 'ks_cache')
self.assertEqual(region1.backend.api.cache_collection, 'cache_region1')
self.assertEqual(region1.backend.api.username, 'test_user')
self.assertEqual(region1.backend.api.password, 'test_password')
# Should be None because of delayed initialization
self.assertIsNone(region1.backend.api._data_manipulator)
random_key1 = uuid.uuid4().hex
region1.set(random_key1, "dummyValue10")
self.assertEqual("dummyValue10", region1.get(random_key1))
# Now should have initialized
self.assertIsInstance(region1.backend.api._data_manipulator,
mongo.BaseTransform)
class_name = '%s.%s' % (MyTransformer.__module__, "MyTransformer")
arguments2 = copy.copy(self.arguments)
arguments2['cache_collection'] = 'cache_region2'
arguments2['son_manipulator'] = class_name
region2 = dp_region.make_region().configure('keystone.cache.mongo',
arguments=arguments2)
# There is no proxy so can access MongoCacheBackend directly
self.assertEqual(region2.backend.api.hosts, 'localhost:27017')
self.assertEqual(region2.backend.api.db_name, 'ks_cache')
self.assertEqual(region2.backend.api.cache_collection, 'cache_region2')
# Should be None because of delayed initialization
self.assertIsNone(region2.backend.api._data_manipulator)
random_key = uuid.uuid4().hex
region2.set(random_key, "dummyValue20")
self.assertEqual("dummyValue20", region2.get(random_key))
# Now should have initialized
self.assertIsInstance(region2.backend.api._data_manipulator,
MyTransformer)
region1.set(random_key1, "dummyValue22")
self.assertEqual("dummyValue22", region1.get(random_key1))
def test_typical_configuration(self):
dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
self.assertTrue(True) # reached here means no initialization error
def test_backend_get_missing_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
def test_backend_set_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue")
self.assertEqual("dummyValue", region.get(random_key))
def test_backend_set_data_with_string_as_valid_ttl(self):
self.arguments['mongo_ttl_seconds'] = '3600'
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
self.assertEqual(region.backend.api.ttl_seconds, 3600)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue")
self.assertEqual("dummyValue", region.get(random_key))
def test_backend_set_data_with_int_as_valid_ttl(self):
self.arguments['mongo_ttl_seconds'] = 1800
region = dp_region.make_region().configure('keystone.cache.mongo',
arguments=self.arguments)
self.assertEqual(region.backend.api.ttl_seconds, 1800)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue")
self.assertEqual("dummyValue", region.get(random_key))
def test_backend_set_none_as_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
region.set(random_key, None)
self.assertIsNone(region.get(random_key))
def test_backend_set_blank_as_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
region.set(random_key, "")
self.assertEqual("", region.get(random_key))
def test_backend_set_same_key_multiple_times(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue")
self.assertEqual("dummyValue", region.get(random_key))
dict_value = {'key1': 'value1'}
region.set(random_key, dict_value)
self.assertEqual(dict_value, region.get(random_key))
region.set(random_key, "dummyValue2")
self.assertEqual("dummyValue2", region.get(random_key))
def test_backend_multi_set_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
random_key1 = uuid.uuid4().hex
random_key2 = uuid.uuid4().hex
random_key3 = uuid.uuid4().hex
mapping = {random_key1: 'dummyValue1',
random_key2: 'dummyValue2',
random_key3: 'dummyValue3'}
region.set_multi(mapping)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertFalse(region.get(random_key))
self.assertEqual("dummyValue1", region.get(random_key1))
self.assertEqual("dummyValue2", region.get(random_key2))
self.assertEqual("dummyValue3", region.get(random_key3))
def test_backend_multi_get_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
random_key1 = uuid.uuid4().hex
random_key2 = uuid.uuid4().hex
random_key3 = uuid.uuid4().hex
mapping = {random_key1: 'dummyValue1',
random_key2: '',
random_key3: 'dummyValue3'}
region.set_multi(mapping)
keys = [random_key, random_key1, random_key2, random_key3]
results = region.get_multi(keys)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, results[0])
self.assertEqual("dummyValue1", results[1])
self.assertEqual("", results[2])
self.assertEqual("dummyValue3", results[3])
def test_backend_multi_set_should_update_existing(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
random_key1 = uuid.uuid4().hex
random_key2 = uuid.uuid4().hex
random_key3 = uuid.uuid4().hex
mapping = {random_key1: 'dummyValue1',
random_key2: 'dummyValue2',
random_key3: 'dummyValue3'}
region.set_multi(mapping)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertEqual("dummyValue1", region.get(random_key1))
self.assertEqual("dummyValue2", region.get(random_key2))
self.assertEqual("dummyValue3", region.get(random_key3))
mapping = {random_key1: 'dummyValue4',
random_key2: 'dummyValue5'}
region.set_multi(mapping)
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertEqual("dummyValue4", region.get(random_key1))
self.assertEqual("dummyValue5", region.get(random_key2))
self.assertEqual("dummyValue3", region.get(random_key3))
def test_backend_multi_set_get_with_blanks_none(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
random_key1 = uuid.uuid4().hex
random_key2 = uuid.uuid4().hex
random_key3 = uuid.uuid4().hex
random_key4 = uuid.uuid4().hex
mapping = {random_key1: 'dummyValue1',
random_key2: None,
random_key3: '',
random_key4: 'dummyValue4'}
region.set_multi(mapping)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertEqual("dummyValue1", region.get(random_key1))
self.assertIsNone(region.get(random_key2))
self.assertEqual("", region.get(random_key3))
self.assertEqual("dummyValue4", region.get(random_key4))
keys = [random_key, random_key1, random_key2, random_key3, random_key4]
results = region.get_multi(keys)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, results[0])
self.assertEqual("dummyValue1", results[1])
self.assertIsNone(results[2])
self.assertEqual("", results[3])
self.assertEqual("dummyValue4", results[4])
mapping = {random_key1: 'dummyValue5',
random_key2: 'dummyValue6'}
region.set_multi(mapping)
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertEqual("dummyValue5", region.get(random_key1))
self.assertEqual("dummyValue6", region.get(random_key2))
self.assertEqual("", region.get(random_key3))
def test_backend_delete_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue")
self.assertEqual("dummyValue", region.get(random_key))
region.delete(random_key)
# should return NO_VALUE as key no longer exists in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
def test_backend_multi_delete_data(self):
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
random_key = uuid.uuid4().hex
random_key1 = uuid.uuid4().hex
random_key2 = uuid.uuid4().hex
random_key3 = uuid.uuid4().hex
mapping = {random_key1: 'dummyValue1',
random_key2: 'dummyValue2',
random_key3: 'dummyValue3'}
region.set_multi(mapping)
# should return NO_VALUE as key does not exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key))
self.assertEqual("dummyValue1", region.get(random_key1))
self.assertEqual("dummyValue2", region.get(random_key2))
self.assertEqual("dummyValue3", region.get(random_key3))
self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
keys = mapping.keys()
region.delete_multi(keys)
self.assertEqual(api.NO_VALUE, region.get("InvalidKey"))
# should return NO_VALUE as keys no longer exist in cache
self.assertEqual(api.NO_VALUE, region.get(random_key1))
self.assertEqual(api.NO_VALUE, region.get(random_key2))
self.assertEqual(api.NO_VALUE, region.get(random_key3))
def test_additional_crud_method_arguments_support(self):
"""Additional arguments should works across find/insert/update."""
self.arguments['wtimeout'] = 30000
self.arguments['j'] = True
self.arguments['continue_on_error'] = True
self.arguments['secondary_acceptable_latency_ms'] = 60
region = dp_region.make_region().configure(
'keystone.cache.mongo',
arguments=self.arguments
)
# There is no proxy so can access MongoCacheBackend directly
api_methargs = region.backend.api.meth_kwargs
self.assertEqual(api_methargs['wtimeout'], 30000)
self.assertEqual(api_methargs['j'], True)
self.assertEqual(api_methargs['continue_on_error'], True)
self.assertEqual(api_methargs['secondary_acceptable_latency_ms'], 60)
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue1")
self.assertEqual("dummyValue1", region.get(random_key))
region.set(random_key, "dummyValue2")
self.assertEqual("dummyValue2", region.get(random_key))
random_key = uuid.uuid4().hex
region.set(random_key, "dummyValue3")
self.assertEqual("dummyValue3", region.get(random_key))
| UTSA-ICS/keystone-kerberos | keystone/tests/unit/test_cache_backend_mongo.py | Python | apache-2.0 | 27,559 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
import errno
import itertools
import logging
import os
import posixpath
from boto.exception import S3ResponseError
from boto.s3.key import Key
from boto.s3.prefix import Prefix
from django.utils.translation import ugettext as _
from aws import s3
from aws.s3 import normpath, s3file, translate_s3_error, S3_ROOT
from aws.s3.s3stat import S3Stat
DEFAULT_READ_SIZE = 1024 * 1024 # 1MB
LOG = logging.getLogger(__name__)
class S3FileSystem(object):
def __init__(self, s3_connection):
self._s3_connection = s3_connection
self._bucket_cache = None
def _init_bucket_cache(self):
if self._bucket_cache is None:
buckets = self._s3_connection.get_all_buckets()
self._bucket_cache = {}
for bucket in buckets:
self._bucket_cache[bucket.name] = bucket
def _get_bucket(self, name):
self._init_bucket_cache()
if name not in self._bucket_cache:
self._bucket_cache[name] = self._s3_connection.get_bucket(name)
return self._bucket_cache[name]
def _get_or_create_bucket(self, name):
try:
bucket = self._get_bucket(name)
except S3ResponseError, e:
if e.status == 404:
bucket = self._s3_connection.create_bucket(name)
self._bucket_cache[name] = bucket
else:
raise e
return bucket
def _get_key(self, path, validate=True):
bucket_name, key_name = s3.parse_uri(path)[:2]
bucket = self._get_bucket(bucket_name)
try:
return bucket.get_key(key_name, validate=validate)
except:
e, exc, tb = sys.exc_info()
raise ValueError(e)
def _stats(self, path):
if s3.is_root(path):
return S3Stat.for_s3_root()
try:
key = self._get_key(path, validate=True)
except S3ResponseError as e:
if e.status == 404:
return None
else:
exc_class, exc, tb = sys.exc_info()
raise exc_class, exc, tb
if key is None:
key = self._get_key(path, validate=False)
return self._stats_key(key)
@staticmethod
def _stats_key(key):
if key.size is not None:
is_directory_name = not key.name or key.name[-1] == '/'
return S3Stat.from_key(key, is_dir=is_directory_name)
else:
key.name = S3FileSystem._append_separator(key.name)
ls = key.bucket.get_all_keys(prefix=key.name, max_keys=1)
if len(ls) > 0:
return S3Stat.from_key(key, is_dir=True)
return None
@staticmethod
def _append_separator(path):
if path and not path.endswith('/'):
path += '/'
return path
@staticmethod
def _cut_separator(path):
return path.endswith('/') and path[:-1] or path
@staticmethod
def isroot(path):
return s3.is_root(path)
@staticmethod
def join(*comp_list):
return s3.join(*comp_list)
@staticmethod
def normpath(path):
return normpath(path)
@staticmethod
def parent_path(path):
parent_dir = S3FileSystem._append_separator(path)
if not s3.is_root(parent_dir):
bucket_name, key_name, basename = s3.parse_uri(path)
if not basename: # bucket is top-level so return root
parent_dir = S3_ROOT
else:
bucket_path = '%s%s' % (S3_ROOT, bucket_name)
key_path = '/'.join(key_name.split('/')[:-1])
parent_dir = s3.abspath(bucket_path, key_path)
return parent_dir
@translate_s3_error
def open(self, path, mode='r'):
key = self._get_key(path, validate=True)
if key is None:
raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
return s3file.open(key, mode=mode)
@translate_s3_error
def read(self, path, offset, length):
fh = self.open(path, 'r')
fh.seek(offset, os.SEEK_SET)
return fh.read(length)
@translate_s3_error
def isfile(self, path):
stat = self._stats(path)
if stat is None:
return False
return not stat.isDir
@translate_s3_error
def isdir(self, path):
stat = self._stats(path)
if stat is None:
return False
return stat.isDir
@translate_s3_error
def exists(self, path):
return self._stats(path) is not None
@translate_s3_error
def stats(self, path):
path = normpath(path)
stats = self._stats(path)
if stats:
return stats
raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
@translate_s3_error
def listdir_stats(self, path, glob=None):
if glob is not None:
raise NotImplementedError(_("Option `glob` is not implemented"))
if s3.is_root(path):
self._init_bucket_cache()
return [S3Stat.from_bucket(b) for b in self._bucket_cache.values()]
bucket_name, prefix = s3.parse_uri(path)[:2]
bucket = self._get_bucket(bucket_name)
prefix = self._append_separator(prefix)
res = []
for item in bucket.list(prefix=prefix, delimiter='/'):
if isinstance(item, Prefix):
res.append(S3Stat.from_key(Key(item.bucket, item.name), is_dir=True))
else:
if item.name == prefix:
continue
res.append(self._stats_key(item))
return res
def listdir(self, path, glob=None):
return [s3.parse_uri(x.path)[2] for x in self.listdir_stats(path, glob)]
@translate_s3_error
def rmtree(self, path, skipTrash=False):
if not skipTrash:
raise NotImplementedError(_('Moving to trash is not implemented for S3'))
bucket_name, key_name = s3.parse_uri(path)[:2]
if bucket_name and not key_name:
raise NotImplementedError(_('Deleting a bucket is not implemented for S3'))
key = self._get_key(path, validate=False)
if key.exists():
to_delete = iter([key])
else:
to_delete = iter([])
if self.isdir(path):
# add `/` to prevent removing of `s3://b/a_new` trying to remove `s3://b/a`
prefix = self._append_separator(key.name)
keys = key.bucket.list(prefix=prefix)
to_delete = itertools.chain(keys, to_delete)
result = key.bucket.delete_keys(to_delete)
if result.errors:
msg = "%d errors occurred during deleting '%s':\n%s" % (
len(result.errors),
'\n'.join(map(repr, result.errors)))
LOG.error(msg)
raise IOError(msg)
@translate_s3_error
def remove(self, path, skip_trash=False):
if not skip_trash:
raise NotImplementedError(_('Moving to trash is not implemented for S3'))
key = self._get_key(path, validate=False)
key.bucket.delete_key(key.name)
def restore(self, *args, **kwargs):
raise NotImplementedError(_('Moving to trash is not implemented for S3'))
@translate_s3_error
def mkdir(self, path, *args, **kwargs):
"""
Creates a directory and any parent directory if necessary.
Actually it creates an empty object: s3://[bucket]/[path]/
"""
bucket_name, key_name = s3.parse_uri(path)[:2]
self._get_or_create_bucket(bucket_name)
stats = self._stats(path)
if stats:
if stats.isDir:
return None
else:
raise IOError(errno.ENOTDIR, "'%s' already exists and is not a directory" % path)
path = self._append_separator(path) # folder-key should ends by /
self.create(path) # create empty object
@translate_s3_error
def copy(self, src, dst, recursive=False, *args, **kwargs):
self._copy(src, dst, recursive=recursive, use_src_basename=True)
@translate_s3_error
def copyfile(self, src, dst, *args, **kwargs):
if self.isdir(dst):
raise IOError(errno.EINVAL, "Copy dst '%s' is a directory" % dst)
self._copy(src, dst, recursive=False, use_src_basename=False)
@translate_s3_error
def copy_remote_dir(self, src, dst, *args, **kwargs):
self._copy(src, dst, recursive=True, use_src_basename=False)
def _copy(self, src, dst, recursive, use_src_basename):
src_st = self.stats(src)
if src_st.isDir and not recursive:
return # omitting directory
dst = s3.abspath(src, dst)
dst_st = self._stats(dst)
if src_st.isDir and dst_st and not dst_st.isDir:
raise IOError(errno.EEXIST, "Cannot overwrite non-directory '%s' with directory '%s'" % (dst, src))
src_bucket, src_key = s3.parse_uri(src)[:2]
dst_bucket, dst_key = s3.parse_uri(dst)[:2]
keep_src_basename = use_src_basename and dst_st and dst_st.isDir
src_bucket = self._get_bucket(src_bucket)
dst_bucket = self._get_bucket(dst_bucket)
if keep_src_basename:
cut = len(posixpath.dirname(src_key)) # cut of an parent directory name
if cut:
cut += 1
else:
cut = len(src_key)
if not src_key.endswith('/'):
cut += 1
for key in src_bucket.list(prefix=src_key):
if not key.name.startswith(src_key):
raise RuntimeError(_("Invalid key to transform: %s") % key.name)
dst_name = posixpath.normpath(s3.join(dst_key, key.name[cut:]))
key.copy(dst_bucket, dst_name)
@translate_s3_error
def rename(self, old, new):
new = s3.abspath(old, new)
self.copy(old, new, recursive=True)
self.rmtree(old, skipTrash=True)
@translate_s3_error
def rename_star(self, old_dir, new_dir):
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % old_dir)
if self.isfile(new_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % new_dir)
ls = self.listdir(old_dir)
for entry in ls:
self.rename(s3.join(old_dir, entry), s3.join(new_dir, entry))
@translate_s3_error
def create(self, path, overwrite=False, data=None):
key = self._get_key(path, validate=False)
key.set_contents_from_string(data or '', replace=overwrite)
@translate_s3_error
def copyFromLocal(self, local_src, remote_dst, *args, **kwargs):
local_src = self._cut_separator(local_src)
remote_dst = self._cut_separator(remote_dst)
def _copy_file(src, dst):
key = self._get_key(dst, validate=False)
fp = open(src, 'r')
key.set_contents_from_file(fp)
if os.path.isdir(local_src):
for (local_dir, sub_dirs, files) in os.walk(local_src, followlinks=False):
remote_dir = local_dir.replace(local_src, remote_dst)
if not sub_dirs and not files:
self.mkdir(remote_dir)
else:
for file_name in files:
_copy_file(os.path.join(local_dir, file_name), os.path.join(remote_dir, file_name))
else:
file_name = os.path.split(local_src)[1]
if self.isdir(remote_dst):
remote_file = os.path.join(remote_dst, file_name)
else:
remote_file = remote_dst
_copy_file(local_src, remote_file)
def setuser(self, user):
pass # user-concept doesn't have sense for this implementation
| jjmleiro/hue | desktop/libs/aws/src/aws/s3/s3fs.py | Python | apache-2.0 | 11,345 |
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module implements the combination of the eHata and ITM models
# according to the requirements developed in the Winnforum WG1 Propagation
# task group.
import math
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ehata import ehata
from itm import pytm
from geo import tropoClim
from geo import refractivity
from geo import ned_indexer
from geo import nlcd_indexer
from geo import land_use
from geo import vincenty
# f in MHz; d and h1/h2 all in meters
def FreeSpacePathLoss(f, d, h1, h2):
r = math.sqrt(d*d + (h1-h2)*(h1-h2))
return 20*math.log10(r) + 20*math.log10(f) - 27.56
class PropagationLossModel:
def __init__(self, itu_dir, ned_dir, nlcd_dir):
self.climIndx = tropoClim.ClimateIndexer(itu_dir)
self.refractivityIndx = refractivity.RefractivityIndexer(itu_dir)
self.nedIndx = ned_indexer.NedIndexer(ned_dir)
self.nlcdIndx = nlcd_indexer.NlcdIndexer(nlcd_dir)
# Calculate the ITM adjusted propagation loss given the
# assumptions on the ITM model.
def ITM_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, reliability):
dielectric_constant = 25.0 # good ground
soil_conductivity = 0.02 # good ground
polarization = 1
confidence = 0.5
# get surface refractivity and radio climate from path midpoint
dist, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
lat_c, lng_c, alpha2 = vincenty.to_dist_bear_vincenty(lat1, lng1, dist/2.0, bearing)
print 'Midpoint = %f, %f' % (lat_c, lng_c)
radio_climate = self.climIndx.TropoClim(lat_c, lng_c)
refractivity = self.refractivityIndx.Refractivity(lat_c, lng_c)
print 'Using climate %d' % radio_climate
print 'Using refractivity %f' % refractivity
print 'Using freq %f' % f
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
print profile[0], profile[1]
#print profile
print 'total distance is ', profile[0]*profile[1]
loss = pytm.point_to_point(profile, h1, h2,
dielectric_constant,
soil_conductivity,
refractivity,
f,
radio_climate,
polarization,
confidence,
reliability)
print 'ITM P2P is ', loss
return loss
# Adjusted propagation loss according to the adjustments in R2-SGN-04
# distance d, heights h1, h2 all in meters
# frequency f in MHz
def ExtendedHata_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, land_cat):
d, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
d = d*1000.0
print 'EHata distance=', d
if d <= 100.0:
# return FSPL
print 'FSPL'
return FreeSpacePathLoss(f, d, h1, h2)
if d > 100.0 and d <= 1000.0:
print 'interp FSPL and ehata'
# interpolate FSPL and ehata
fspl_loss = FreeSpacePathLoss(f, 100.0, h1, h2)
print ' fspl_loss=', fspl_loss
ehata_loss, abm = ehata.ExtendedHata_MedianBasicPropLoss(f, 1.0, h1, h2, land_cat)
print ' ehata_loss=', ehata_loss
print ' ( abm=', abm
return fspl_loss + (1.0 + math.log10(d/1000.0))*(ehata_loss - fspl_loss)
if d > 1000.0 and d < 80000.0:
# return eHata value without adjustment.
print 'EHata only for d=%f' % d
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
return ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile)
if d >= 80000.0:
print 'EHata for distance %f > 80km' % d
# Derive profile_80km
lat_80, lng_80, heading = vincenty.to_dist_bear_vincenty(lat1, lng1, 80.0, bearing)
print '80km point is %f %f' % (lat_80, lng_80)
profile_80km = self.nedIndx.Profile(lat1, lng1, lat_80, lng_80)
# Find J adjustment...
ehata_loss = ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile_80km)
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat_80, lng_80, h2, f, 0.5)
J = ehata_loss - itm_loss
print 'Got ehata=%f itm=%f J=%f' % (ehata_loss, itm_loss, J)
if J < 0.0:
J = 0.0
return self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5) + J
def LandClassification(self, lat, lng):
code = self.nlcdIndx.NlcdCode(lat, lng)
return self.nlcdIndx.NlcdLandCategory(code)
# This is the oracle for propagation loss from point 1 to point 2 at frequency f (Mhz).
def PropagationLoss(self, f, lat1, lng1, h1, lat2, lng2, h2, land_cat=''):
if land_cat == '':
code = self.nlcdIndx.NlcdCode(lat2, lng2)
if code == 11:
code = self.nlcdIndx.NlcdCode(lat1, lng1)
land_cat = land_use.NlcdLandCategory(code)
print 'Using land_cat =', land_cat
# Calculate effective heights of tx and rx:
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
h1eff, h2eff = EffectiveHeights(h1, h2, profile)
if land_cat == 'RURAL' or h1eff >= 200: # Only h1eff (CBSD effective height) counts
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
print 'Returning itm_loss for rural > 200: ', itm_loss
return itm_loss
else:
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
ehata_loss = self.ExtendedHata_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, land_cat)
if ehata_loss > itm_loss:
return ehata_loss
return itm_loss
# Run directly, takes args of "lat1, lng1, h1, lat2, lng2, h2, f" and prints the
# (median) propagation loss in dB.
if __name__ == '__main__':
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
ituDir = os.path.join(os.path.join(rootDir, 'data'), 'itu')
nedDir = os.path.join(os.path.join(rootDir, 'data'), 'ned')
nlcdDir = os.path.join(os.path.join(rootDir, 'data'), 'nlcd')
prop = PropagationLossModel(ituDir, nedDir, nlcdDir)
loss = prop.PropagationLoss(float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]),
float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]),
float(sys.argv[7]))
print 'Propagation Loss = ', loss, ' dB'
| gregbillock/Spectrum-Access-System | src/prop/model.py | Python | apache-2.0 | 6,985 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import operator
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None, read_value=True):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = data_flow_ops.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op, read_value=read_value)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = data_flow_ops.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
# The following two functions are copied from
# tensorflow/python/eager/backprop.py. We do not directly use them as they are
# not exported and subject to change at any time.
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,
g.indices),
g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = ops.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = aggregate_indexed_slices_gradients(grads)
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
# This class is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L56.
# We copy it since contrib has been removed from TensorFlow.
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `byte_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.compat.v1.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
# This function is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L105.
# We copy it since contrib has been removed from TensorFlow.
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError('Op %s must have a single output' % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr('shape'))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
| tensorflow/benchmarks | scripts/tf_cnn_benchmarks/variable_mgr_util.py | Python | apache-2.0 | 26,469 |
from bson import ObjectId
import jsonschema
import numpy
from girder.exceptions import ValidationException
from girder.models.file import File
from girder.models.model_base import Model
from girder.models.upload import Upload
from girder.utility.acl_mixin import AccessControlMixin
from .image import Image
from .segmentation_helpers import ScikitSegmentationHelper
from .study import Study
from .user import User
class Annotation(AccessControlMixin, Model):
def initialize(self):
self.name = 'annotation'
self.ensureIndices(['studyId', 'imageId', 'userId'])
# TODO: resourceColl should be ['study', 'isic_archive'], but upstream support is unclear
self.resourceColl = 'folder'
self.resourceParent = 'studyId'
def createAnnotation(self, study, image, user):
annotation = self.save({
'studyId': study['_id'],
'imageId': image['_id'],
'userId': user['_id'],
'startTime': None,
'stopTime': None,
'status': None,
'log': [],
'responses': {},
'markups': {},
})
return annotation
def getState(self, annotation):
return (Study().State.COMPLETE
if annotation['stopTime'] is not None
else Study().State.ACTIVE)
def _superpixelsToMasks(self, superpixelValues, image):
possibleSuperpixelNums = numpy.array([
superpixelNum
for superpixelNum, featureValue
in enumerate(superpixelValues)
if featureValue == 0.5
])
definiteSuperpixelNums = numpy.array([
superpixelNum
for superpixelNum, featureValue
in enumerate(superpixelValues)
if featureValue == 1.0
])
superpixelsLabelData = Image().superpixelsData(image)
possibleMask = numpy.in1d(
superpixelsLabelData.flat,
possibleSuperpixelNums
).reshape(superpixelsLabelData.shape)
possibleMask = possibleMask.astype(numpy.bool_)
definiteMask = numpy.in1d(
superpixelsLabelData.flat,
definiteSuperpixelNums
).reshape(superpixelsLabelData.shape)
definiteMask = definiteMask.astype(numpy.bool_)
return possibleMask, definiteMask
def _superpixelsToMaskMarkup(self, superpixelValues, image):
possibleMask, definiteMask = self._superpixelsToMasks(superpixelValues, image)
markupMask = numpy.zeros(possibleMask.shape, dtype=numpy.uint8)
markupMask[possibleMask] = 128
markupMask[definiteMask] = 255
return markupMask
def saveSuperpixelMarkup(self, annotation, featureId, superpixelValues):
image = Image().load(annotation['imageId'], force=True, exc=True)
annotator = User().load(annotation['userId'], force=True, exc=True)
markupMask = self._superpixelsToMaskMarkup(superpixelValues, image)
markupMaskEncodedStream = ScikitSegmentationHelper.writeImage(markupMask, 'png')
markupFile = Upload().uploadFromFile(
obj=markupMaskEncodedStream,
size=len(markupMaskEncodedStream.getvalue()),
name='annotation_%s_%s.png' % (
annotation['_id'],
# Rename features to ensure the file is downloadable on Windows
featureId.replace(' : ', ' ; ').replace('/', ',')
),
# TODO: change this once a bug in upstream Girder is fixed
parentType='annotation',
parent=annotation,
attachParent=True,
user=annotator,
mimeType='image/png'
)
markupFile['superpixels'] = superpixelValues
# TODO: remove this once a bug in upstream Girder is fixed
markupFile['attachedToType'] = ['annotation', 'isic_archive']
markupFile = File().save(markupFile)
annotation['markups'][featureId] = {
'fileId': markupFile['_id'],
'present': bool(markupMask.any())
}
return Annotation().save(annotation)
def getMarkupFile(self, annotation, featureId, includeSuperpixels=False):
if featureId in annotation['markups']:
markupFile = File().load(
annotation['markups'][featureId]['fileId'],
force=True,
exc=True,
fields={'superpixels': includeSuperpixels}
)
return markupFile
else:
return None
def renderMarkup(self, annotation, featureId):
image = Image().load(annotation['imageId'], force=True, exc=True)
renderData = Image().imageData(image)
markupFile = Annotation().getMarkupFile(annotation, featureId)
if markupFile:
markupMask = Image()._decodeDataFromFile(markupFile)
else:
image = Image().load(annotation['imageId'], force=True, exc=True)
markupMask = numpy.zeros(
(
image['meta']['acquisition']['pixelsY'],
image['meta']['acquisition']['pixelsX']
),
dtype=numpy.uint8
)
possibleMask = markupMask == 128
definiteMask = markupMask == 255
POSSIBLE_OVERLAY_COLOR = numpy.array([250, 250, 0])
DEFINITE_OVERLAY_COLOR = numpy.array([0, 0, 255])
renderData[possibleMask] = POSSIBLE_OVERLAY_COLOR
renderData[definiteMask] = DEFINITE_OVERLAY_COLOR
return renderData
def filter(self, annotation, user=None, additionalKeys=None):
output = {
'_id': annotation['_id'],
'_modelType': 'annotation',
'studyId': annotation['studyId'],
'image': Image().filterSummary(
Image().load(annotation['imageId'], force=True, exc=True),
user),
'user': User().filterSummary(
user=User().load(annotation['userId'], force=True, exc=True),
accessorUser=user),
'state': Annotation().getState(annotation)
}
if Annotation().getState(annotation) == Study().State.COMPLETE:
output.update({
'status': annotation['status'],
'startTime': annotation['startTime'],
'stopTime': annotation['stopTime'],
'responses': annotation['responses'],
'markups': {
featureId: markup['present']
for featureId, markup
in annotation['markups'].items()
},
'log': annotation.get('log', [])
})
return output
def filterSummary(self, annotation, user=None):
return {
'_id': annotation['_id'],
'studyId': annotation['studyId'],
'userId': annotation['userId'],
'imageId': annotation['imageId'],
'state': self.getState(annotation)
}
def remove(self, annotation, **kwargs):
for featureId in annotation['markups'].keys():
File().remove(self.getMarkupFile(annotation, featureId))
return super(Annotation, self).remove(annotation)
def validate(self, doc): # noqa C901
for field in ['studyId', 'userId', 'imageId']:
if not isinstance(doc.get(field), ObjectId):
raise ValidationException(f'Annotation field "{field}" must be an ObjectId')
study = Study().load(doc['studyId'], force=True, exc=False)
if not study:
raise ValidationException(
'Annotation field "studyId" must reference an existing Study.')
# If annotation is complete
if doc.get('stopTime'):
schema = {
# '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'annotation',
'type': 'object',
'properties': {
'_id': {
# TODO
},
'studyId': {
# TODO
},
'imageId': {
# TODO
},
'userId': {
# TODO
},
'startTime': {
# TODO
},
'stopTime': {
# TODO
},
'status': {
'type': 'string',
'enum': ['ok', 'phi', 'quality', 'zoom', 'inappropriate', 'other']
},
'responses': {
'type': 'object',
'properties': {
question['id']: {
'type': 'string',
# TODO: Support non-'select' question types
'enum': question['choices']
}
for question in study['meta']['questions']
},
'additionalProperties': False
},
'markups': {
'type': 'object',
'properties': {
feature['id']: {
'type': 'object',
'properties': {
'fileId': {
# TODO
},
'present': {
'type': 'boolean'
}
},
'required': ['fileId', 'present'],
'additionalProperties': False
}
for feature in study['meta']['features']
},
'additionalProperties': False
},
'log': {
# TODO
}
},
'required': [
'_id', 'studyId', 'imageId', 'userId', 'startTime', 'stopTime', 'status',
'responses', 'markups', 'log'
],
'additionalProperties': False
}
try:
jsonschema.validate(doc, schema)
except jsonschema.ValidationError as e:
raise ValidationException(f'Invalid annotation: {str(e)}')
return doc
| ImageMarkup/isic-archive | isic_archive/models/annotation.py | Python | apache-2.0 | 10,692 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lexer import RqlLexer, RqlSyntaxError
import random
import math
DEFAULT_CONSTANTS = {
'PI': 3.1415926535897932384,
'NULL': None,
'FALSE': False,
'TRUE': True,
}
DEFAULT_FUNCTIONS = {
'abs': abs,
'min': min,
'max': max,
'sum': lambda *args: sum(args),
'acos': math.acos,
'asin': math.asin,
'atan': math.atan,
'ceil': math.ceil,
'cos': math.cos,
'exp': math.exp,
'floor': math.floor,
'log': math.log,
'random': random.random,
'sin': math.sin,
'sqrt': math.sqrt,
'tan': math.tan
}
# http://technet.microsoft.com/en-us/library/ms190276.aspx
EXPRESSION_OPERATORS = [
{'+', '-', '&', '^', '|'},
{'*', '/', '%'},
{'~'},
]
PREDICATE_OPERATORS = [
{'OR'},
{'AND'},
{'LIKE'},
{'==', '<', '>', '<=', '>=', '<>', '==', '!=', '!>', '!<'},
]
PREDICATE_EXPRESSION_OPERATORS = PREDICATE_OPERATORS + EXPRESSION_OPERATORS
def get_binary_precedence(operators, token):
if token:
tvalue = token.value
precedence = 0
for oplist in operators:
if tvalue in oplist:
return precedence
precedence += 1
return -1
def get_expr_identifiers(expression):
identifiers = set()
for key in expression.__slots__:
expr = getattr(expression, key)
if isinstance(expr, RqlIdentifier):
identifiers.add(expr)
else:
identifiers |= get_identifiers(expression)
return identifiers
class RqlFunctionCall(object):
__slots__ = ('name', 'args')
def __init__(self, name, args):
self.name = name
self.args = args
def __str__(self):
return '%s(%s)' % (self.name, self.args)
def __repr__(self):
return 'RqlFunctionCall(%r, %r)' % (self.name, self.args)
def is_constant(self, context ):
return False
def resolve(self, context):
# TODO: If args are const
return self
def evaluate(self, context):
func = context.functions.get(self.name)
if not func:
raise RqlSyntaxError("Unknown function '%s'" % self.name)
args = []
for arg in self.args:
args.append(arg.evaluate(context))
return func(*args)
class RqlIdentifier(object):
__slots__ = ('name')
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return 'RqlIdentifier(%r)' % (self.name)
def __cmp__(self, other):
return cmp(self.name, other)
def __hash__(self):
return hash(self.name)
def is_constant(self, context):
return False
def resolve(self, context):
return self
def evaluate(self, context):
return context.get_identifier(self.name)
class RqlBaseDataType(object):
__slots__ = ('value')
def __init__(self, value):
self.value = value
def __str__(self):
return '%s' % self.value
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.value)
def is_constant(self, context):
return True
def resolve(self, context):
return self
def evaluate(self, context):
return self.value
class RqlNumber(RqlBaseDataType):
def __init__(self, value):
super(RqlNumber, self).__init__(float(value))
class RqlBoolean(RqlBaseDataType):
def __init__(self, value):
super(RqlBoolean, self).__init__(bool(value))
class RqlString(RqlBaseDataType):
def __init__(self, value):
super(RqlString, self).__init__(value)
def __str__(self):
return '"%s"' % self.value
class RqlWildcard(RqlBaseDataType):
def __init__(self, value):
super(RqlWildcard, self).__init__(value)
class RqlUnary(object):
__slots__ = ('operator', 'expression')
def __init__(self, operator, expression):
self.operator = operator
self.expression = expression
def __str__(self):
return '%s %s' % (self.operator, self.expression)
def __repr__(self):
return 'RqlUnary(%r, %r)' % (self.operator, self.expression)
def is_constant(self, context):
return self.expression.is_constant(context)
def resolve(self, context):
self.expression = self.expression.resolve(context)
if self.is_constant(context):
return RqlNumber(self.evaluate(context))
return self
def evaluate(self, context):
expr = self.expression.evaluate(context)
if self.operator == '+': return expr
if self.operator == '-': return -expr
if self.operator == '~': return ~expr
if self.operator == 'NOT': return not expr
raise RqlSyntaxError("Unknown operator '%s'" % self.operator)
class RqlBinary(object):
__slots__ = ('operator', 'left', 'right')
def __init__(self, operator, left, right):
self.operator = operator
self.left = left
self.right = right
def __str__(self):
return '(%s %s %s)' % (self.left, self.operator, self.right)
def __repr__(self):
return 'RqlBinary(%r, %r, %r)' % (self.operator, self.left, self.right)
def is_constant(self, context):
return self.left.is_constant(context) and self.right.is_constant(context)
def resolve(self, context):
self.left = self.left.resolve(context)
self.right = self.right.resolve(context)
if self.is_constant(context):
result = self.evaluate(context)
if isinstance(result, basestring):
return RqlString(result)
if isinstance(result, (int, float)):
return RqlNumber(result)
if isinstance(result, bool):
return RqlBoolean(result)
raise RqlSyntaxError("Unexpected type %s %r" % (type(result), result))
return self
def evaluate(self, context):
left = self.left.evaluate(context)
right = self.right.evaluate(context)
# Expression
if self.operator == '+': return left + right
if self.operator == '-': return left - right
if self.operator == '&': return left & right
if self.operator == '|': return left | right
if self.operator == '^': return left ^ right
if self.operator == '*': return left * right
if self.operator == '/': return left / right
if self.operator == '%': return left % right
# Predicate
if self.operator == '=': return left == right
if self.operator == '<': return left < right
if self.operator == '>': return left > right
if self.operator == '<=': return left <= right
if self.operator == '>=': return left >= right
if self.operator == '==': return left == right
if self.operator == '!=': return left != right
if self.operator == 'IS': return left is right
if self.operator == 'OR': return left or right
if self.operator == 'AND': return left and right
# LIKE
raise RqlSyntaxError("Unknown operator '%s'" % self)
class RqlAssignment(object):
__slots__ = ('name', 'expression')
def __init__(self, name, expression):
self.name = name
self.expression = expression
def __repr__(self):
return 'RqlAssignment(%r, %r)' % (self.name, self.expression)
def is_constant(self, context):
return self.expression.is_constant(context)
def resolve(self):
self.expression = self.expression.resolve()
if self.is_constant(context):
return RqlNumber(self.evaluate(context))
return self
def evaluate(self, context):
right = self.expression.evaluate(context)
context.variables[self.name] = right
return right
class RqlParser(object):
__slots__ = ('lexer')
def __init__(self, lexer):
self.lexer = lexer
def parse_identifier(self):
items = self.parse_dot_list(self.lexer.expect_string_or_identifier)
return RqlIdentifier('.'.join(items))
# <List> ::= <Item> '.' <List> | <Item>
def parse_dot_list(self, item_parse_func):
return self.parse_list('.', item_parse_func)
# <List> ::= <Item> ',' <List> | <Item>
def parse_comma_list(self, item_parse_func):
return self.parse_list(',', item_parse_func)
# <List> ::= <Item> <separator> <List> | <Item>
def parse_list(self, separator, item_parse_func):
def_list = []
while True:
item_def = item_parse_func()
if not item_def:
raise Exception("Invalid list item definition")
def_list.append(item_def)
if not self.lexer.match_seq(separator):
break
return def_list
def _call_method(self, name, *args, **kwargs):
return getattr(self, name)(*args, **kwargs)
# =============================================================================
# Expression
# =============================================================================
class RqlExprParser(RqlParser):
# FunctionCall ::= Identifier '(' ')' ||
# Identifier '(' ArgumentList ')'
def parse_function_call(self):
args = []
name = self.lexer.expect_identifier().lower()
self.lexer.expect_seq('(')
if not self.lexer.peek_seq(')'):
args = self.parse_comma_list(self.parse_expression)
self.lexer.expect_seq(')')
return RqlFunctionCall(name, args)
# Primary ::= Identifier |
# Number |
# '(' Assignment ')' |
# FunctionCall
def parse_primary(self):
token = self.lexer.peek()
if not token:
raise RqlSyntaxError("Unexpected termination of expression")
if self.lexer.match_seq('('):
expr = self.parse_assignment()
self.lexer.expect_seq(')')
elif token.is_identifier():
if self.lexer.peek_seq(None, '('):
return self.parse_function_call()
expr = self.parse_identifier()
elif token.is_number():
token = self.lexer.next()
expr = RqlNumber(token.value)
elif token.is_string():
token = self.lexer.next()
expr = RqlString(token.value)
elif token.is_boolean():
token = self.lexer.next()
expr = RqlBoolean(token.value)
elif token.value == '*':
token = self.lexer.next()
expr = RqlWildcard(token.value)
else:
raise RqlSyntaxError("Parse error, can not process token %r" % token)
if self.lexer.match_seq('BETWEEN'):
return self.parse_between(expr)
elif self.lexer.match_seq('NOT', 'BETWEEN'):
return RqlUnary('NOT', self.parse_between(expr))
return expr
# Unary ::= Primary |
# '-' Unary |
# '~' Unary
def parse_unary(self):
operator = self.lexer.match_operator('-', '+', '~', 'NOT')
if operator:
expr = self.parse_unary()
return RqlUnary(operator, expr)
return self.parse_primary()
def parse_between(self, expr):
# expr >= min_expr AND expr <= max_expr
min_expr = self.parse_expression()
self.lexer.expect_seq('AND')
max_expr = self.parse_expression()
return RqlBinary('AND', RqlBinary('>=', expr, min_expr), RqlBinary('<=', expr, min_expr))
def parse_binary(self, operators, lhs, min_precedence):
while get_binary_precedence(operators, self.lexer.peek()) >= min_precedence:
operation = self.lexer.next()
rhs = self.parse_unary()
in_precedence = min_precedence
prec = get_binary_precedence(operators, self.lexer.peek())
while prec > in_precedence:
rhs = self.parse_binary(operators, rhs, prec)
in_precedence = prec
prec = get_binary_precedence(operators, self.lexer.peek())
lhs = RqlBinary(operation.value, lhs, rhs)
return lhs
def parse_expression(self):
return self.parse_binary(EXPRESSION_OPERATORS, self.parse_unary(), 0)
def parse_predicate(self):
return self.parse_binary(PREDICATE_EXPRESSION_OPERATORS, self.parse_unary(), 0)
# Assignment ::= Identifier '=' Assignment |
# Additive
def parse_assignment(self):
expr = self.parse_binary(PREDICATE_EXPRESSION_OPERATORS, self.parse_unary(), 0)
if isinstance(expr, RqlIdentifier):
if self.lexer.match_seq('='):
return RqlAssignment(expr, self.parse_assignment())
return expr
return expr
@staticmethod
def parse(expression):
lexer = RqlLexer(expression)
parser = RqlExprParser(lexer)
expr = parser.parse_assignment()
if lexer.has_next():
raise RqlSyntaxError("Unexpected token '%s'" % lexer.peek())
return expr
class RqlContext:
NULL = object()
def __init__(self):
self.constants = dict(DEFAULT_CONSTANTS)
self.functions = dict(DEFAULT_FUNCTIONS)
self.variables = {}
def get_identifier(self, name):
value = self.constants.get(name.upper(), self.NULL)
if value is not self.NULL: return value
value = self.variables.get(name.lower(), self.NULL)
if value is not self.NULL: return value
raise RqlSyntaxError("Unknown identifier '%s' - %r" % (name, self.variables))
def get_qualified_identifier(self, qualifier, name):
return self.get_identifier('%s.%s' % (qualifier, name))
def resolve(self, root):
return root.resolve(root)
def evaluate(self, root):
if 0 and __debug__:
print
print root
print self.resolve(root)
if not root:
raise RqlSyntaxError("Unknown syntax node")
return root.evaluate(self)
if __name__ == '__main__':
ctx = RqlContext()
ctx.variables['table.field'] = 20
print ctx.evaluate(RqlExprParser.parse("1"))
print ctx.evaluate(RqlExprParser.parse("1 + 2"))
print ctx.evaluate(RqlExprParser.parse("FALSE"))
print ctx.evaluate(RqlExprParser.parse("'foo'"))
print ctx.evaluate(RqlExprParser.parse("'foo' + \"-\" + 'bar'"))
print ctx.evaluate(RqlExprParser.parse("NOT 1 + NOT 0"))
print ctx.evaluate(RqlExprParser.parse("(NOT 1) + (NOT 0)"))
print ctx.evaluate(RqlExprParser.parse("1 + 2 * 3"))
print ctx.evaluate(RqlExprParser.parse("1 + (2 * 3)"))
print ctx.evaluate(RqlExprParser.parse("1 OR 2 AND 3"))
print ctx.evaluate(RqlExprParser.parse("1 OR (2 AND 3)"))
print ctx.evaluate(RqlExprParser.parse("sum(1, 2, 3) + 5 * 2 + min(5 / 10, 2 * -3)"))
print ctx.evaluate(RqlExprParser.parse("table.field + 1 + PI"))
print ctx.evaluate(RqlExprParser.parse("(15 + 2) != (- 14 + 3)"))
print ctx.evaluate(RqlExprParser.parse("(15 + 2) <= (14 + 3)"))
print ctx.evaluate(RqlExprParser.parse("(10 > 0 AND 20 < 2)"))
print ctx.evaluate(RqlExprParser.parse("(10 > 0 OR 20 < 2)"))
print RqlExprParser.parse("1 + 10 BETWEEN 1 AND 20 + 2")
print RqlExprParser.parse("1 + 10 NOT BETWEEN 1 AND 20 + 2")
print ctx.evaluate(RqlExprParser.parse("11 * 20 / 3"))
print ctx.evaluate(RqlExprParser.parse("11 * 21 / 2"))
| matteobertozzi/RaleighSL | src/raleigh-client/pyraleigh/sql/expr.py | Python | apache-2.0 | 14,678 |
"""Plugin common functions."""
import logging
import os
import re
import shutil
import tempfile
import OpenSSL
import pkg_resources
import zope.interface
from acme.jose import util as jose_util
from certbot import constants
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot import reverter
from certbot import util
logger = logging.getLogger(__name__)
def option_namespace(name):
"""ArgumentParser options namespace (prefix of all options)."""
return name + "-"
def dest_namespace(name):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return name.replace("-", "_") + "_"
private_ips_regex = re.compile(
r"(^127\.0\.0\.1)|(^10\.)|(^172\.1[6-9]\.)|"
r"(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^192\.168\.)")
hostname_regex = re.compile(
r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*[a-z]+$", re.IGNORECASE)
@zope.interface.implementer(interfaces.IPlugin)
class Plugin(object):
"""Generic plugin."""
# provider is not inherited, subclasses must define it on their own
# @zope.interface.provider(interfaces.IPluginFactory)
def __init__(self, config, name):
self.config = config
self.name = name
@jose_util.abstractclassmethod
def add_parser_arguments(cls, add):
"""Add plugin arguments to the CLI argument parser.
NOTE: If some of your flags interact with others, you can
use cli.report_config_interaction to register this to ensure
values are correctly saved/overridable during renewal.
:param callable add: Function that proxies calls to
`argparse.ArgumentParser.add_argument` prepending options
with unique plugin name prefix.
"""
@classmethod
def inject_parser_options(cls, parser, name):
"""Inject parser options.
See `~.IPlugin.inject_parser_options` for docs.
"""
# dummy function, doesn't check if dest.startswith(self.dest_namespace)
def add(arg_name_no_prefix, *args, **kwargs):
# pylint: disable=missing-docstring
return parser.add_argument(
"--{0}{1}".format(option_namespace(name), arg_name_no_prefix),
*args, **kwargs)
return cls.add_parser_arguments(add)
@property
def option_namespace(self):
"""ArgumentParser options namespace (prefix of all options)."""
return option_namespace(self.name)
def option_name(self, name):
"""Option name (include plugin namespace)."""
return self.option_namespace + name
@property
def dest_namespace(self):
"""ArgumentParser dest namespace (prefix of all destinations)."""
return dest_namespace(self.name)
def dest(self, var):
"""Find a destination for given variable ``var``."""
# this should do exactly the same what ArgumentParser(arg),
# does to "arg" to compute "dest"
return self.dest_namespace + var.replace("-", "_")
def conf(self, var):
"""Find a configuration value for variable ``var``."""
return getattr(self.config, self.dest(var))
# other
class Installer(Plugin):
"""An installer base class with reverter and ssl_dhparam methods defined.
Installer plugins do not have to inherit from this class.
"""
def __init__(self, *args, **kwargs):
super(Installer, self).__init__(*args, **kwargs)
self.reverter = reverter.Reverter(self.config)
def add_to_checkpoint(self, save_files, save_notes, temporary=False):
"""Add files to a checkpoint.
:param set save_files: set of filepaths to save
:param str save_notes: notes about changes during the save
:param bool temporary: True if the files should be added to a
temporary checkpoint rather than a permanent one. This is
usually used for changes that will soon be reverted.
:raises .errors.PluginError: when unable to add to checkpoint
"""
if temporary:
checkpoint_func = self.reverter.add_to_temp_checkpoint
else:
checkpoint_func = self.reverter.add_to_checkpoint
try:
checkpoint_func(save_files, save_notes)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def finalize_checkpoint(self, title):
"""Timestamp and save changes made through the reverter.
:param str title: Title describing checkpoint
:raises .errors.PluginError: when an error occurs
"""
try:
self.reverter.finalize_checkpoint(title)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def recovery_routine(self):
"""Revert all previously modified files.
Reverts all modified files that have not been saved as a checkpoint
:raises .errors.PluginError: If unable to recover the configuration
"""
try:
self.reverter.recovery_routine()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def revert_temporary_config(self):
"""Rollback temporary checkpoint.
:raises .errors.PluginError: when unable to revert config
"""
try:
self.reverter.revert_temporary_config()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def rollback_checkpoints(self, rollback=1):
"""Rollback saved checkpoints.
:param int rollback: Number of checkpoints to revert
:raises .errors.PluginError: If there is a problem with the input or
the function is unable to correctly revert the configuration
"""
try:
self.reverter.rollback_checkpoints(rollback)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def view_config_changes(self):
"""Show all of the configuration changes that have taken place.
:raises .errors.PluginError: If there is a problem while processing
the checkpoints directories.
"""
try:
self.reverter.view_config_changes()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
@property
def ssl_dhparams(self):
"""Full absolute path to ssl_dhparams file."""
return os.path.join(self.config.config_dir, constants.SSL_DHPARAMS_DEST)
@property
def updated_ssl_dhparams_digest(self):
"""Full absolute path to digest of updated ssl_dhparams file."""
return os.path.join(self.config.config_dir, constants.UPDATED_SSL_DHPARAMS_DIGEST)
def install_ssl_dhparams(self):
"""Copy Certbot's ssl_dhparams file into the system's config dir if required."""
return install_version_controlled_file(
self.ssl_dhparams,
self.updated_ssl_dhparams_digest,
constants.SSL_DHPARAMS_SRC,
constants.ALL_SSL_DHPARAMS_HASHES)
class Addr(object):
r"""Represents an virtual host address.
:param str addr: addr part of vhost address
:param str port: port number or \*, or ""
"""
def __init__(self, tup, ipv6=False):
self.tup = tup
self.ipv6 = ipv6
@classmethod
def fromstring(cls, str_addr):
"""Initialize Addr from string."""
if str_addr.startswith('['):
# ipv6 addresses starts with [
endIndex = str_addr.rfind(']')
host = str_addr[:endIndex + 1]
port = ''
if len(str_addr) > endIndex + 2 and str_addr[endIndex + 1] == ':':
port = str_addr[endIndex + 2:]
return cls((host, port), ipv6=True)
else:
tup = str_addr.partition(':')
return cls((tup[0], tup[2]))
def __str__(self):
if self.tup[1]:
return "%s:%s" % self.tup
return self.tup[0]
def normalized_tuple(self):
"""Normalized representation of addr/port tuple
"""
if self.ipv6:
return (self.get_ipv6_exploded(), self.tup[1])
return self.tup
def __eq__(self, other):
if isinstance(other, self.__class__):
# compare normalized to take different
# styles of representation into account
return self.normalized_tuple() == other.normalized_tuple()
return False
def __hash__(self):
return hash(self.tup)
def get_addr(self):
"""Return addr part of Addr object."""
return self.tup[0]
def get_port(self):
"""Return port."""
return self.tup[1]
def get_addr_obj(self, port):
"""Return new address object with same addr and new port."""
return self.__class__((self.tup[0], port), self.ipv6)
def _normalize_ipv6(self, addr):
"""Return IPv6 address in normalized form, helper function"""
addr = addr.lstrip("[")
addr = addr.rstrip("]")
return self._explode_ipv6(addr)
def get_ipv6_exploded(self):
"""Return IPv6 in normalized form"""
if self.ipv6:
return ":".join(self._normalize_ipv6(self.tup[0]))
return ""
def _explode_ipv6(self, addr):
"""Explode IPv6 address for comparison"""
result = ['0', '0', '0', '0', '0', '0', '0', '0']
addr_list = addr.split(":")
if len(addr_list) > len(result):
# too long, truncate
addr_list = addr_list[0:len(result)]
append_to_end = False
for i in range(0, len(addr_list)):
block = addr_list[i]
if len(block) == 0:
# encountered ::, so rest of the blocks should be
# appended to the end
append_to_end = True
continue
elif len(block) > 1:
# remove leading zeros
block = block.lstrip("0")
if not append_to_end:
result[i] = str(block)
else:
# count the location from the end using negative indices
result[i-len(addr_list)] = str(block)
return result
class TLSSNI01(object):
"""Abstract base for TLS-SNI-01 challenge performers"""
def __init__(self, configurator):
self.configurator = configurator
self.achalls = []
self.indices = []
self.challenge_conf = os.path.join(
configurator.config.config_dir, "le_tls_sni_01_cert_challenge.conf")
# self.completed = 0
def add_chall(self, achall, idx=None):
"""Add challenge to TLSSNI01 object to perform at once.
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
TLSSNI01 challenge.
:param int idx: index to challenge in a larger array
"""
self.achalls.append(achall)
if idx is not None:
self.indices.append(idx)
def get_cert_path(self, achall):
"""Returns standardized name for challenge certificate.
:param .KeyAuthorizationAnnotatedChallenge achall: Annotated
tls-sni-01 challenge.
:returns: certificate file name
:rtype: str
"""
return os.path.join(self.configurator.config.work_dir,
achall.chall.encode("token") + ".crt")
def get_key_path(self, achall):
"""Get standardized path to challenge key."""
return os.path.join(self.configurator.config.work_dir,
achall.chall.encode("token") + '.pem')
def get_z_domain(self, achall):
"""Returns z_domain (SNI) name for the challenge."""
return achall.response(achall.account_key).z_domain.decode("utf-8")
def _setup_challenge_cert(self, achall, cert_key=None):
"""Generate and write out challenge certificate."""
cert_path = self.get_cert_path(achall)
key_path = self.get_key_path(achall)
# Register the path before you write out the file
self.configurator.reverter.register_file_creation(True, key_path)
self.configurator.reverter.register_file_creation(True, cert_path)
response, (cert, key) = achall.response_and_validation(
cert_key=cert_key)
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert)
key_pem = OpenSSL.crypto.dump_privatekey(
OpenSSL.crypto.FILETYPE_PEM, key)
# Write out challenge cert and key
with open(cert_path, "wb") as cert_chall_fd:
cert_chall_fd.write(cert_pem)
with util.safe_open(key_path, 'wb', chmod=0o400) as key_file:
key_file.write(key_pem)
return response
def install_version_controlled_file(dest_path, digest_path, src_path, all_hashes):
"""Copy a file into an active location (likely the system's config dir) if required.
:param str dest_path: destination path for version controlled file
:param str digest_path: path to save a digest of the file in
:param str src_path: path to version controlled file found in distribution
:param list all_hashes: hashes of every released version of the file
"""
current_hash = crypto_util.sha256sum(src_path)
def _write_current_hash():
with open(digest_path, "w") as f:
f.write(current_hash)
def _install_current_file():
shutil.copyfile(src_path, dest_path)
_write_current_hash()
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(dest_path):
_install_current_file()
return
# there's already a file there. if it's up to date, do nothing. if it's not but
# it matches a known file hash, we can update it.
# otherwise, print a warning once per new version.
active_file_digest = crypto_util.sha256sum(dest_path)
if active_file_digest == current_hash: # already up to date
return
elif active_file_digest in all_hashes: # safe to update
_install_current_file()
else: # has been manually modified, not safe to update
# did they modify the current version or an old version?
if os.path.isfile(digest_path):
with open(digest_path, "r") as f:
saved_digest = f.read()
# they modified it after we either installed or told them about this version, so return
if saved_digest == current_hash:
return
# there's a new version but we couldn't update the file, or they deleted the digest.
# save the current digest so we only print this once, and print a warning
_write_current_hash()
logger.warning("%s has been manually modified; updated file "
"saved to %s. We recommend updating %s for security purposes.",
dest_path, src_path, dest_path)
# test utils used by certbot_apache/certbot_nginx (hence
# "pragma: no cover") TODO: this might quickly lead to dead code (also
# c.f. #383)
def dir_setup(test_dir, pkg): # pragma: no cover
"""Setup the directories necessary for the configurator."""
def expanded_tempdir(prefix):
"""Return the real path of a temp directory with the specified prefix
Some plugins rely on real paths of symlinks for working correctly. For
example, certbot-apache uses real paths of configuration files to tell
a virtual host from another. On systems where TMP itself is a symbolic
link, (ex: OS X) such plugins will be confused. This function prevents
such a case.
"""
return os.path.realpath(tempfile.mkdtemp(prefix))
temp_dir = expanded_tempdir("temp")
config_dir = expanded_tempdir("config")
work_dir = expanded_tempdir("work")
os.chmod(temp_dir, constants.CONFIG_DIRS_MODE)
os.chmod(config_dir, constants.CONFIG_DIRS_MODE)
os.chmod(work_dir, constants.CONFIG_DIRS_MODE)
test_configs = pkg_resources.resource_filename(
pkg, os.path.join("testdata", test_dir))
shutil.copytree(
test_configs, os.path.join(temp_dir, test_dir), symlinks=True)
return temp_dir, config_dir, work_dir
| jsha/letsencrypt | certbot/plugins/common.py | Python | apache-2.0 | 16,182 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import traceback
from restlib import response
from mint import logerror
from mint import mint_error
from mint.rest.api import models
from mint.rest.modellib import converter
log = logging.getLogger(__name__)
class ErrorCallback(object):
def __init__(self, controller):
self.controller = controller
def processException(self, request, excClass, exception, tb):
message = '%s: %s' % (excClass.__name__, exception)
if hasattr(exception, 'status'):
status = exception.status
else:
status = 500
self.logError(request, excClass, exception, tb, doEmail=True)
# Only send the traceback information if it's an unintentional
# exception (i.e. a 500)
if status == 500:
tbString = 'Traceback:\n' + ''.join(traceback.format_tb(tb))
text = [message + '\n', tbString]
else:
tbString = None
text = [message + '\n']
isFlash = 'HTTP_X_FLASH_VERSION' in request.headers or 'X-Wrap-Response-Codes' in request.headers
if not getattr(request, 'contentType', None):
request.contentType = 'text/xml'
request.responseType = 'xml'
if isFlash or request.contentType != 'text/plain':
# for text/plain, just print out the traceback in the easiest to read
# format.
code = status
if isFlash:
# flash ignores all data sent with a non-200 error
status = 200
error = models.Fault(code=code, message=message,
traceback=tbString)
text = converter.toText(request.responseType, error,
self.controller, request)
return response.Response(text, content_type=request.contentType,
status=status)
def logError(self, request, e_type, e_value, e_tb, doEmail=True):
info = {
'uri' : request.thisUrl,
'path' : request.path,
'method' : request.method,
'headers_in' : request.headers,
'request_params' : request.GET,
'post_params' : request.POST,
'remote' : request.remote,
}
try:
logerror.logErrorAndEmail(self.controller.cfg, e_type, e_value,
e_tb, 'API call', info, doEmail=doEmail)
except mint_error.MailError, err:
log.error("Error sending mail: %s", str(err))
| sassoftware/mint | mint/rest/middleware/error.py | Python | apache-2.0 | 3,212 |
#!/usr/local/bin/python3
# Find the number of elements of a list
def lenOf(mylist):
return (len(mylist))
print (lenOf("Hello"))
print (lenOf(""))
print (lenOf([123,123,123]))
| mocovenwitch/haskell-learning-notes | python/problem04.py | Python | apache-2.0 | 181 |
#!/usr/bin/python
#credits : https://gist.github.com/TheCrazyT/11263599
import socket
import ssl
import select
import time
import re
import sys
from thread import start_new_thread
from struct import pack
from random import randint
from subprocess import call
import os
import fnmatch
import argparse
import logging
class lakkucast:
def __init__(self):
self.status = None
self.session_id = None
self.protocolVersion = 0
self.source_id = "sender-0"
self.destination_id = "receiver-0"
self.chromecast_server = "192.168.1.23" #living room audio
self.socket = 0
self.type_enum = 0
self.type_string = 2
self.type_bytes = self.type_string
self.session = 0
self.play_state = None
self.sleep_between_media = 5
self.content_id = None
self.socket_fail_count = 100
def clean(self,s):
return re.sub(r'[\x00-\x1F\x7F]', '?',s)
def getType(self, fieldId,t):
return (fieldId << 3) | t
def getLenOf(self, s):
x = ""
l = len(s)
while(l > 0x7F):
x += pack("B",l & 0x7F | 0x80)
l >>= 7
x += pack("B",l & 0x7F)
return x
def init_status(self):
self.socket = socket.socket()
self.socket = ssl.wrap_socket(self.socket)
#print "connecting ..."
self.socket.connect((self.chromecast_server,8009))
payloadType = 0 #0=string
data = "{\"type\":\"CONNECT\",\"origin\":{}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "Connecting ..."
self.socket.write(msg)
payloadType = 0 #0=string
data = "{\"type\":\"GET_STATUS\",\"requestId\":46479000}"
lnData = self.getLenOf(data)
namespace = "urn:x-cast:com.google.cast.receiver"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" % (len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print "sending status request..."
self.socket.write(msg)
m1=None
m3=None
result=""
count = 0
while m1==None and m3==None:
lastresult = self.socket.read(2048)
result += lastresult
#print "#"+lastresult.encode("hex")
#if lastresult != "":
# print self.clean("\nH!"+lastresult)
#print result
m1 = re.search('"sessionId":"(?P<session>[^"]+)"', result)
m2 = re.search('"statusText":"(?P<status>[^"]+)"', result)
m3 = re.search('"playerState":"(?P<play_state>[^"]+)"', result)
m4 = re.search('"contentId":"(?P<content_id>[^"]+)"', result)
count = count + 1
if count > self.socket_fail_count:
self.status = None
self.play_state = None
self.status = None
break
#print "#%i" % (m==None)
if m1 != None:
#print "session:",m1.group("session")
self.session = m1.group("session")
if m2 != None:
#print "status:",m2.group("status")
self.status = m2.group("status")
if m3 != None:
#print "play_state:",m3.group("play_state")
self.play_state = m3.group("play_state")
if m4 != None:
#print "contentid:",m4.group("content_id")
self.content_id = m4.group("content_id")
payloadType = 0 #0=string
data = "{MESSAGE_TYPE: 'SET_VOLUME','volume': {'level': 0.2}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "Connecting ..."
self.socket.write(msg)
def get_status(self):
return " ".join(["main_status:" , self.get_main_status() , "play_status:" , self.get_play_status()])
def get_main_status(self):
if self.status == None:
status_str = "None"
else:
status_str = self.status
return (status_str)
def get_play_status(self):
if self.play_state == None:
play_state_str = "None"
else:
play_state_str = self.play_state
return (play_state_str)
def ready_to_play(self):
if self.status == "Now Casting":
return False
if self.status == "Ready To Cast" or self.status == None or self.status == "Chromecast Home Screen":
if self.play_state == None:
return True
if self.play_state == "IDLE":
return True
if self.play_state == "PLAYING":
return False
if self.play_state == "BUFFERING":
return False
return True
else:
return False
def close_connection(self):
self.socket.close()
def play_url(self, url):
payloadType = 0 #0=string
data = "{\"type\":\"LAUNCH\",\"requestId\":46479001,\"appId\":\"CC1AD845\"}"
lnData = self.getLenOf(data)
namespace = "urn:x-cast:com.google.cast.receiver"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "sending ..."
self.socket.write(msg)
m=None
result=""
while m==None:
lastresult = self.socket.read(2048)
result += lastresult
#print "#"+lastresult.encode("hex")
#print clean("!"+lastresult)
m = re.search('"transportId":"(?P<transportId>[^"]+)"', result)
self.destination_id = m.group("transportId")
payloadType = 0 #0=string
data = "{\"type\":\"CONNECT\",\"origin\":{}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "sending ..."
self.socket.write(msg)
payloadType = 0 #0=string
data = "{\"type\":\"LOAD\",\"requestId\":46479002,\"sessionId\":\""+self.session+"\",\"media\":{\"contentId\":\""+url+"\",\"streamType\":\"buffered\",\"contentType\":\"video/mp4\"},\"autoplay\":true,\"currentTime\":0,\"customData\":{\"payload\":{\"title:\":\"\"}}}"
lnData = self.getLenOf(data)
namespace = "urn:x-cast:com.google.cast.media"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "sending ..."
#print "LOADING"
self.socket.write(msg)
payloadType = 0 #0=string
volume = min(max(0, round(0.1, 1)), 1)
data = "{MESSAGE_TYPE: 'SET_VOLUME','volume': {'level': volume}}"
lnData = self.getLenOf(data)
#print len(lnData),len(data),lnData.encode("hex")
namespace = "urn:x-cast:com.google.cast.tp.connection"
msg = pack(">BBBB%dsBB%dsBB%dsBBB%ds%ds" %
(len(self.source_id),
len(self.destination_id),
len(namespace),
len(lnData),
len(data)),
self.getType(1,self.type_enum),
self.protocolVersion,
self.getType(2,self.type_string),
len(self.source_id),
self.source_id,
self.getType(3,self.type_string),
len(self.destination_id),
self.destination_id,
self.getType(4,self.type_string),
len(namespace),
namespace,
self.getType(5,self.type_enum),
payloadType,
self.getType(6,self.type_bytes),
lnData,
data)
msg = pack(">I%ds" % (len(msg)),len(msg),msg)
#print msg.encode("hex")
#print "Connecting ..."
self.socket.write(msg)
self.close_connection()
#try:
# while True:
# print "before lastresult"
# lastresult = self.socket.read(2048)
# if lastresult!="":
# #print "#"+lastresult.encode("hex")
# print self.clean("! In loop:"+lastresult)
# finally:
# print "final"
# socket.close()
# print "socket closed"
class manage_lightwave:
def __init__(self):
self.room = "Main\ Bedroom"
self.device = "Screen"
self.lightwave_cmd = "/usr/local/bin/lightwaverf"
def start_screen(self):
cmd = " ".join([self.lightwave_cmd, self.room, self.device, "on", ">cmd.log", "2>&1"])
os.system(cmd)
return(cmd)
def stop_screen(self):
cmd = " ".join([self.lightwave_cmd, self.room, self.device, "off", ">cmd.log", "2>&1"])
os.system(cmd)
return(cmd)
class lakkucast_media:
def __init__(self):
self.top_dir = "/data"
self.top_url = "http://192.168.1.98"
#self.media_dirs = ["media/test/sample1", "media/test/sample2"]
self.media_dirs = ["media/TV-Shows/English/Friends", "media/TV-Shows/English/That 70s Show", "media/TV-Shows/English/Big Bang Theory"]
self.media_data = "/data/webapps/lakku/lakkucast/media.dat"
def random_play(self, num_play):
count_dir = 0
num_dirs = len(self.media_dirs)
while count_dir < num_dirs:
rand_main = randint(0, (len(self.media_dirs)-1))
url_list = []
sel_dir = os.path.join(self.top_dir, self.media_dirs[rand_main])
if os.path.isdir(sel_dir):
count_dir = count_dir + 1
matches = []
for root, dirnames, filenames in os.walk(sel_dir):
for filename in fnmatch.filter(filenames, '*.mp4'):
matches.append(os.path.join(root, filename).replace(self.top_dir,''))
count = 1
loop_count = 1
while count <= num_play:
file_rand = randint(0, (len(matches)-1))
file_name = "".join([self.top_url , matches[file_rand]])
if self.played_before(file_name) == False:
if file_name not in url_list:
url_list.append(file_name)
count = count + 1
loop_count = loop_count + 1
if loop_count == (len(matches)-1):
break
if count < num_play:
continue
else:
fhand = open(self.media_data, 'a+')
for u in url_list:
fhand.write(u+'\n')
fhand.close()
return url_list
def played_before(self, media_name):
if media_name in open(self.media_data).read():
return True
return False
def reset_media_history(self):
fhand = open(self.media_data, 'w')
fhand.truncate()
fhand.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="lakkucast")
parser.add_argument("--play", help="Play x videos ")
parser.add_argument("--stop", help="Stop playing and shutdown", action='store_true')
parser.add_argument("--reset", help="Stop playing", action='store_true')
parser.add_argument("--reset_media_history", help="Reset media history", action='store_true')
args = parser.parse_args()
log_file = "/data/webapps/lakku/lakkucast/lakkucast.log"
log_level = logging.INFO
logging.basicConfig(filename=log_file, level=log_level,
format='%(asctime)s [%(levelname)s] %(message)s')
logging.info("Starting lakkucast.")
if args.play:
num_play = int(args.play) * 2
logging.info("Play count: %s"
% (args.play))
lm = lakkucast_media()
lwrf = manage_lightwave()
logging.info("Sending start command to lwrf")
logging.info(lwrf.start_screen())
lwrf.start_screen()
logging.info("Sleeping after lwrf start")
url_list = lm.random_play(num_play)
time.sleep(20)
if url_list != None:
logging.info("Got %d urls to play"
% (len(url_list)))
for u in url_list:
logging.info("Trying URL: %s"
% (u))
l = lakkucast()
logging.info("Sleeping before main init")
time.sleep(l.sleep_between_media)
l.init_status()
logging.info(l.get_status())
if l.ready_to_play():
logging.info("Playing URL: %s"
% (u))
l.play_url(u)
l.init_status()
logging.info(l.get_status())
while not l.ready_to_play():
time.sleep(l.sleep_between_media)
l.init_status()
logging.info(l.get_status())
time.sleep(l.sleep_between_media)
logging.info("Sending stop command to lwrf")
logging.info(lwrf.stop_screen())
else:
logging.info("No urls returned by player")
l.play_url("http://192.168.1.98/media/test/water.mp4")
time.sleep(l.sleep_between_media)
lwrf = manage_lightwave()
logging.info("Sending stop command to lwrf")
logging.info(lwrf.stop_screen())
if args.stop:
l = lakkucast()
l.init_status()
logging.info("Calling stop")
logging.info(l.get_status())
l.play_url("http://192.168.1.98/media/test/water.mp4")
time.sleep(10)
lwrf = manage_lightwave()
logging.info("Sending stop command to lwrf")
logging.info(lwrf.stop_screen())
if args.reset:
l = lakkucast()
l.init_status()
logging.info("Calling reset")
logging.info(l.get_status())
l.play_url("http://192.168.1.98/media/test/water.mp4")
if args.reset_media_history:
logging.info("Calling Reset media history")
lm = lakkucast_media()
lm.reset_media_history()
| srirajan/lakkucast | lakkucast.py | Python | apache-2.0 | 21,535 |
import os
import sys
def test(arg):
return os.system('bin/nosetests -s -d -v %s' % arg)
def main(args):
if not args:
print("Run as bin/python run_failure.py <test>, for example: \n"
"bin/python run_failure.py "
"kazoo.tests.test_watchers:KazooChildrenWatcherTests")
return
arg = args[0]
i = 0
while 1:
i += 1
print('Run number: %s' % i)
ret = test(arg)
if ret != 0:
break
if __name__ == '__main__':
main(sys.argv[1:])
| bsanders/kazoo | run_failure.py | Python | apache-2.0 | 536 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library containing Tokenizer definitions.
The RougeScorer class can be instantiated with the tokenizers defined here. New
tokenizers can be defined by creating a subclass of the Tokenizer abstract class
and overriding the tokenize() method.
"""
import abc
from nltk.stem import porter
from rouge import tokenize
class Tokenizer(abc.ABC):
"""Abstract base class for a tokenizer.
Subclasses of Tokenizer must implement the tokenize() method.
"""
@abc.abstractmethod
def tokenize(self, text):
raise NotImplementedError("Tokenizer must override tokenize() method")
class DefaultTokenizer(Tokenizer):
"""Default tokenizer which tokenizes on whitespace."""
def __init__(self, use_stemmer=False):
"""Constructor for DefaultTokenizer.
Args:
use_stemmer: boolean, indicating whether Porter stemmer should be used to
strip word suffixes to improve matching.
"""
self._stemmer = porter.PorterStemmer() if use_stemmer else None
def tokenize(self, text):
return tokenize.tokenize(text, self._stemmer)
| google-research/google-research | rouge/tokenizers.py | Python | apache-2.0 | 1,661 |
# Copyright 2012 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interface for shares extension."""
try:
from urllib import urlencode # noqa
except ImportError:
from urllib.parse import urlencode # noqa
from manilaclient import api_versions
from manilaclient import base
from manilaclient.common import constants
from manilaclient.openstack.common.apiclient import base as common_base
class ShareSnapshot(common_base.Resource):
"""Represent a snapshot of a share."""
def __repr__(self):
return "<ShareSnapshot: %s>" % self.id
def update(self, **kwargs):
"""Update this snapshot."""
self.manager.update(self, **kwargs)
def reset_state(self, state):
"""Update the snapshot with the privided state."""
self.manager.reset_state(self, state)
def delete(self):
"""Delete this snapshot."""
self.manager.delete(self)
def force_delete(self):
"""Delete the specified snapshot ignoring its current state."""
self.manager.force_delete(self)
class ShareSnapshotManager(base.ManagerWithFind):
"""Manage :class:`ShareSnapshot` resources."""
resource_class = ShareSnapshot
def create(self, share, force=False, name=None, description=None):
"""Create a snapshot of the given share.
:param share_id: The ID of the share to snapshot.
:param force: If force is True, create a snapshot even if the
share is busy. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:rtype: :class:`ShareSnapshot`
"""
body = {'snapshot': {'share_id': common_base.getid(share),
'force': force,
'name': name,
'description': description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot):
"""Get a snapshot.
:param snapshot: The :class:`ShareSnapshot` instance or string with ID
of snapshot to delete.
:rtype: :class:`ShareSnapshot`
"""
snapshot_id = common_base.getid(snapshot)
return self._get('/snapshots/%s' % snapshot_id, 'snapshot')
def list(self, detailed=True, search_opts=None, sort_key=None,
sort_dir=None):
"""Get a list of snapshots of shares.
:param search_opts: Search options to filter out shares.
:param sort_key: Key to be sorted.
:param sort_dir: Sort direction, should be 'desc' or 'asc'.
:rtype: list of :class:`ShareSnapshot`
"""
if search_opts is None:
search_opts = {}
if sort_key is not None:
if sort_key in constants.SNAPSHOT_SORT_KEY_VALUES:
search_opts['sort_key'] = sort_key
else:
raise ValueError(
'sort_key must be one of the following: %s.'
% ', '.join(constants.SNAPSHOT_SORT_KEY_VALUES))
if sort_dir is not None:
if sort_dir in constants.SORT_DIR_VALUES:
search_opts['sort_dir'] = sort_dir
else:
raise ValueError(
'sort_dir must be one of the following: %s.'
% ', '.join(constants.SORT_DIR_VALUES))
if search_opts:
query_string = urlencode(
sorted([(k, v) for (k, v) in list(search_opts.items()) if v]))
if query_string:
query_string = "?%s" % (query_string,)
else:
query_string = ''
if detailed:
path = "/snapshots/detail%s" % (query_string,)
else:
path = "/snapshots%s" % (query_string,)
return self._list(path, 'snapshots')
def delete(self, snapshot):
"""Delete a snapshot of a share.
:param snapshot: The :class:`ShareSnapshot` to delete.
"""
self._delete("/snapshots/%s" % common_base.getid(snapshot))
def _do_force_delete(self, snapshot, action_name="force_delete"):
"""Delete the specified snapshot ignoring its current state."""
return self._action(action_name, common_base.getid(snapshot))
@api_versions.wraps("1.0", "2.6")
def force_delete(self, snapshot):
return self._do_force_delete(snapshot, "os-force_delete")
@api_versions.wraps("2.7") # noqa
def force_delete(self, snapshot):
return self._do_force_delete(snapshot, "force_delete")
def update(self, snapshot, **kwargs):
"""Update a snapshot.
:param snapshot: The :class:`ShareSnapshot` instance or string with ID
of snapshot to delete.
:rtype: :class:`ShareSnapshot`
"""
if not kwargs:
return
body = {'snapshot': kwargs, }
snapshot_id = common_base.getid(snapshot)
return self._update("/snapshots/%s" % snapshot_id, body)
def _do_reset_state(self, snapshot, state, action_name="reset_status"):
"""Update the specified share snapshot with the provided state."""
return self._action(action_name, snapshot, {"status": state})
@api_versions.wraps("1.0", "2.6")
def reset_state(self, snapshot, state):
return self._do_reset_state(snapshot, state, "os-reset_status")
@api_versions.wraps("2.7") # noqa
def reset_state(self, snapshot, state):
return self._do_reset_state(snapshot, state, "reset_status")
def _action(self, action, snapshot, info=None, **kwargs):
"""Perform a snapshot 'action'."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/snapshots/%s/action' % common_base.getid(snapshot)
return self.api.client.post(url, body=body)
| sniperganso/python-manilaclient | manilaclient/v2/share_snapshots.py | Python | apache-2.0 | 6,363 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.target_types import DockerImage
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
DockerVersionContextValue,
)
from pants.backend.docker.util_rules.docker_build_context import rules as context_rules
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*context_rules(),
*core_target_types_rules(),
*package_pex_binary.rules(),
*parser_rules(),
*pex_from_targets.rules(),
*target_types_rules.rules(),
QueryRule(BuiltPackage, [PexBinaryFieldSet]),
QueryRule(DockerBuildContext, (DockerBuildContextRequest,)),
],
target_types=[DockerImage, FilesGeneratorTarget, PexBinary],
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
def assert_build_context(
rule_runner: RuleRunner,
address: Address,
expected_files: list[str],
expected_version_context: FrozenDict[str, DockerVersionContextValue] | None = None,
) -> None:
context = rule_runner.request(
DockerBuildContext,
[
DockerBuildContextRequest(
address=address,
build_upstream_images=False,
)
],
)
snapshot = rule_runner.request(Snapshot, [context.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_version_context is not None:
assert expected_version_context == context.version_context
def test_file_dependencies(rule_runner: RuleRunner) -> None:
# img_A -> files_A
# img_A -> img_B -> files_B
rule_runner.add_to_build_file(
"src/a",
dedent(
"""\
docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"])
files(name="files_A", sources=["files/**"])
"""
),
)
rule_runner.add_to_build_file(
"src/b",
dedent(
"""\
docker_image(name="img_B", dependencies=[":files_B"])
files(name="files_B", sources=["files/**"])
"""
),
)
rule_runner.create_files("src/a", ["Dockerfile"])
rule_runner.create_files("src/a/files", ["a01", "a02"])
rule_runner.create_files("src/b", ["Dockerfile"])
rule_runner.create_files("src/b/files", ["b01", "b02"])
# We want files_B in build context for img_B
assert_build_context(
rule_runner,
Address("src/b", target_name="img_B"),
expected_files=["src/b/Dockerfile", "src/b/files/b01", "src/b/files/b02"],
)
# We want files_A in build context for img_A, but not files_B
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=["src/a/Dockerfile", "src/a/files/a01", "src/a/files/a02"],
)
# Mixed.
rule_runner.add_to_build_file(
"src/c",
dedent(
"""\
docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"])
"""
),
)
rule_runner.create_files("src/c", ["Dockerfile"])
assert_build_context(
rule_runner,
Address("src/c", target_name="img_C"),
expected_files=[
"src/c/Dockerfile",
"src/a/files/a01",
"src/a/files/a02",
"src/b/files/b01",
"src/b/files/b02",
],
)
def test_files_out_of_tree(rule_runner: RuleRunner) -> None:
# src/a:img_A -> res/static:files
rule_runner.add_to_build_file(
"src/a",
dedent(
"""\
docker_image(name="img_A", dependencies=["res/static:files"])
"""
),
)
rule_runner.add_to_build_file(
"res/static",
dedent(
"""\
files(name="files", sources=["!BUILD", "**/*"])
"""
),
)
rule_runner.create_files("src/a", ["Dockerfile"])
rule_runner.create_files("res/static", ["s01", "s02"])
rule_runner.create_files("res/static/sub", ["s03"])
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=[
"src/a/Dockerfile",
"res/static/s01",
"res/static/s02",
"res/static/sub/s03",
],
)
def test_packaged_pex_path(rule_runner: RuleRunner) -> None:
# This test is here to ensure that we catch if there is any change in the generated path where
# built pex binaries go, as we rely on that for dependency inference in the Dockerfile.
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image(dependencies=["src/python/proj/cli:bin"])""",
"src/docker/Dockerfile": """FROM python""",
"src/python/proj/cli/BUILD": """pex_binary(name="bin", entry_point="main.py")""",
"src/python/proj/cli/main.py": """print("cli main")""",
}
)
assert_build_context(
rule_runner,
Address("src/docker", target_name="docker"),
expected_files=["src/docker/Dockerfile", "src.python.proj.cli/bin.pex"],
)
def test_version_context_from_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image()""",
"src/docker/Dockerfile": dedent(
"""\
FROM python:3.8
FROM alpine as interim
FROM interim
FROM scratch:1-1 as output
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
expected_version_context=FrozenDict(
{
"baseimage": DockerVersionContextValue({"tag": "3.8"}),
"stage0": DockerVersionContextValue({"tag": "3.8"}),
"interim": DockerVersionContextValue({"tag": "latest"}),
"stage2": DockerVersionContextValue({"tag": "latest"}),
"output": DockerVersionContextValue({"tag": "1-1"}),
}
),
)
| patricklaw/pants | src/python/pants/backend/docker/util_rules/docker_build_context_test.py | Python | apache-2.0 | 7,173 |
__all__ = [
'ComponentStore',
]
from pathlib import Path
import copy
import requests
from typing import Callable
from . import _components as comp
from .structures import ComponentReference
class ComponentStore:
def __init__(self, local_search_paths=None, url_search_prefixes=None):
self.local_search_paths = local_search_paths or ['.']
self.url_search_prefixes = url_search_prefixes or []
self._component_file_name = 'component.yaml'
self._digests_subpath = 'versions/sha256'
self._tags_subpath = 'versions/tags'
def load_component_from_url(self, url):
return comp.load_component_from_url(url)
def load_component_from_file(self, path):
return comp.load_component_from_file(path)
def load_component(self, name, digest=None, tag=None):
'''
Loads component local file or URL and creates a task factory function
Search locations:
<local-search-path>/<name>/component.yaml
<url-search-prefix>/<name>/component.yaml
If the digest is specified, then the search locations are:
<local-search-path>/<name>/versions/sha256/<digest>
<url-search-prefix>/<name>/versions/sha256/<digest>
If the tag is specified, then the search locations are:
<local-search-path>/<name>/versions/tags/<digest>
<url-search-prefix>/<name>/versions/tags/<digest>
Args:
name: Component name used to search and load the component artifact containing the component definition.
Component name usually has the following form: group/subgroup/component
digest: Strict component version. SHA256 hash digest of the component artifact file. Can be used to load a specific component version so that the pipeline is reproducible.
tag: Version tag. Can be used to load component version from a specific branch. The version of the component referenced by a tag can change in future.
Returns:
A factory function with a strongly-typed signature.
Once called with the required arguments, the factory constructs a pipeline task instance (ContainerOp).
'''
#This function should be called load_task_factory since it returns a factory function.
#The real load_component function should produce an object with component properties (e.g. name, description, inputs/outputs).
#TODO: Change this function to return component spec object but it should be callable to construct tasks.
component_ref = ComponentReference(name=name, digest=digest, tag=tag)
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(
component_spec=component_ref.spec,
component_ref=component_ref,
)
def _load_component_spec_in_component_ref(
self,
component_ref: ComponentReference,
) -> ComponentReference:
'''Takes component_ref, finds the component spec and returns component_ref with .spec set to the component spec.
See ComponentStore.load_component for the details of the search logic.
'''
if component_ref.spec:
return component_ref
component_ref = copy.copy(component_ref)
if component_ref.url:
component_ref.spec = comp._load_component_spec_from_url(component_ref.url)
return component_ref
name = component_ref.name
if not name:
raise TypeError("name is required")
if name.startswith('/') or name.endswith('/'):
raise ValueError('Component name should not start or end with slash: "{}"'.format(name))
digest = component_ref.digest
tag = component_ref.tag
tried_locations = []
if digest is not None and tag is not None:
raise ValueError('Cannot specify both tag and digest')
if digest is not None:
path_suffix = name + '/' + self._digests_subpath + '/' + digest
elif tag is not None:
path_suffix = name + '/' + self._tags_subpath + '/' + tag
#TODO: Handle symlinks in GIT URLs
else:
path_suffix = name + '/' + self._component_file_name
#Trying local search paths
for local_search_path in self.local_search_paths:
component_path = Path(local_search_path, path_suffix)
tried_locations.append(str(component_path))
if component_path.is_file():
# TODO: Verify that the content matches the digest (if specified).
component_ref._local_path = str(component_path)
component_ref.spec = comp._load_component_spec_from_file(str(component_path))
return component_ref
#Trying URL prefixes
for url_search_prefix in self.url_search_prefixes:
url = url_search_prefix + path_suffix
tried_locations.append(url)
try:
response = requests.get(url) #Does not throw exceptions on bad status, but throws on dead domains and malformed URLs. Should we log those cases?
response.raise_for_status()
except:
continue
if response.content:
# TODO: Verify that the content matches the digest (if specified).
component_ref.url = url
component_ref.spec = comp._load_component_spec_from_yaml_or_zip_bytes(response.content)
return component_ref
raise RuntimeError('Component {} was not found. Tried the following locations:\n{}'.format(name, '\n'.join(tried_locations)))
def _load_component_from_ref(self, component_ref: ComponentReference) -> Callable:
component_ref = self._load_component_spec_in_component_ref(component_ref)
return comp._create_task_factory_from_component_spec(component_spec=component_ref.spec, component_ref=component_ref)
ComponentStore.default_store = ComponentStore(
local_search_paths=[
'.',
],
url_search_prefixes=[
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/'
],
)
| kubeflow/kfp-tekton-backend | sdk/python/kfp/components/_component_store.py | Python | apache-2.0 | 6,197 |
from django.apps import AppConfig
class BugzConfig(AppConfig):
name = 'Bugz'
| jessica-younker/I-Got-Bugs | api/Bugz/apps.py | Python | apache-2.0 | 83 |
from django import template
import randomcolor
register = template.Library()
CATEGORY_NAMES = {
'cs.AI': 'Artificial Intelligence',
'cs.CL': 'Computation and Language',
'cs.CC': 'Computational Complexity',
'cs.CE': 'Computational Engineering',
'cs.CG': 'Computational Geometry',
'cs.GT': 'Game Theory',
'cs.CV': 'Computer Vision',
'cs.CY': 'Computers and Society',
'cs.CR': 'Cryptography and Security',
'cs.DS': 'Data Structures and Algorithms',
'cs.DB': 'Databases',
'cs.DL': 'Digital Libraries',
'cs.DM': 'Discrete Mathematics',
'cs.DC': 'Distributed Computing',
'cs.ET': 'Emerging Technologies',
'cs.FL': 'Formal Languages',
'cs.GL': 'General Literature',
'cs.GR': 'Graphics',
'cs.AR': 'Hardware Architecture',
'cs.HC': 'Human-Computer Interaction',
'cs.IR': 'Information Retrieval',
'cs.IT': 'Information Theory',
'cs.LG': 'Learning',
'cs.LO': 'Logic',
'cs.MS': 'Mathematical Software',
'cs.MA': 'Multiagent Systems',
'cs.MM': 'Multimedia',
'cs.NI': 'Networking and Internet',
'cs.NE': 'Neural and Evolutionary Computing',
'cs.NA': 'Numerical Analysis',
'cs.OS': 'Operating Systems',
'cs.PF': 'Performance',
'cs.PL': 'Programming Languages',
'cs.RO': 'Robotics',
'cs.SI': 'Social and Information Networks',
'cs.SE': 'Software Engineering',
'cs.SD': 'Sound',
'cs.SC': 'Symbolic Computation',
'cs.SY': 'Systems and Control',
'stat.ML': 'Machine Learning',
}
@register.inclusion_tag('papers/templatetags/category_badge.html')
def category_badge(category):
if category not in CATEGORY_NAMES:
return {}
return {
'category': category,
'name': CATEGORY_NAMES[category],
'color': randomcolor.RandomColor(category).generate(luminosity='dark')[0],
}
| arxiv-vanity/arxiv-vanity | arxiv_vanity/papers/templatetags/papers.py | Python | apache-2.0 | 1,851 |
import os
import csv
def get_value_or_default(value, default=None):
result = value.strip()
if len(result) == 0:
result = default
return result
def read_csv_file(csv_file_name,
delimiter,
quote_char='"',
skip_header=True,
encoding='latin-1'):
print(csv_file_name)
fd = open(file=csv_file_name, mode='r', encoding=encoding)
csv_reader = csv.reader(fd, delimiter=delimiter, quotechar=quote_char)
if skip_header:
next(csv_reader)
for row in csv_reader:
yield row
fd.close() | CALlanoR/virtual_environments | medical_etls/part1/etls/utils.py | Python | apache-2.0 | 607 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from unittest import TestCase
import numpy as np
import pandas as pd
import h5py
from exatomic import Universe
from exatomic.base import resource
from exatomic.molcas.output import Output, Orb, HDF
# TODO : change df.shape[0] == num to len(df.index) == num everywhere
class TestOutput(TestCase):
"""Test the Molcas output file editor."""
def setUp(self):
self.cdz = Output(resource('mol-carbon-dz.out'))
self.uo2sp = Output(resource('mol-uo2-anomb.out'))
self.mamcart = Output(resource('mol-ch3nh2-631g.out'))
self.mamsphr = Output(resource('mol-ch3nh2-anovdzp.out'))
self.c2h6 = Output(resource('mol-c2h6-basis.out'))
def test_add_orb(self):
"""Test adding orbital file functionality."""
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.assertTrue(hasattr(self.mamcart, 'momatrix'))
self.assertTrue(hasattr(self.mamcart, 'orbital'))
with self.assertRaises(ValueError):
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='same')
self.assertTrue('same' in self.mamcart.momatrix.columns)
self.assertTrue('same' in self.mamcart.orbital.columns)
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='diff', orbocc='diffocc')
self.assertTrue('diff' in self.mamcart.momatrix.columns)
self.assertTrue('diffocc' in self.mamcart.orbital.columns)
uni = self.mamcart.to_universe()
self.assertTrue(hasattr(uni, 'momatrix'))
self.assertTrue(hasattr(uni, 'orbital'))
def test_add_overlap(self):
"""Test adding an overlap matrix."""
self.cdz.add_overlap(resource('mol-carbon-dz.overlap'))
self.assertTrue(hasattr(self.cdz, 'overlap'))
uni = self.cdz.to_universe()
self.assertTrue(hasattr(uni, 'overlap'))
def test_parse_atom(self):
"""Test the atom table parser."""
self.uo2sp.parse_atom()
self.assertEqual(self.uo2sp.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.atom))))
self.mamcart.parse_atom()
self.assertEqual(self.mamcart.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.atom))))
self.mamsphr.parse_atom()
self.assertEqual(self.mamsphr.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.atom))))
def test_parse_basis_set_order(self):
"""Test the basis set order table parser."""
self.uo2sp.parse_basis_set_order()
self.assertEqual(self.uo2sp.basis_set_order.shape[0], 69)
cols = list(set(self.uo2sp.basis_set_order._columns))
test = pd.DataFrame(self.uo2sp.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mamcart.parse_basis_set_order()
self.assertEqual(self.mamcart.basis_set_order.shape[0], 28)
cols = list(set(self.mamcart.basis_set_order._columns))
test = pd.DataFrame(self.mamcart.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
self.mamsphr.parse_basis_set_order()
self.assertEqual(self.mamsphr.basis_set_order.shape[0], 53)
cols = list(set(self.mamsphr.basis_set_order._columns))
test = pd.DataFrame(self.mamsphr.basis_set_order[cols])
self.assertTrue(np.all(pd.notnull(test)))
def test_parse_basis_set(self):
"""Test the gaussian basis set table parser."""
self.uo2sp.parse_basis_set()
self.assertEqual(self.uo2sp.basis_set.shape[0], 451)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.uo2sp.basis_set))))
self.mamcart.parse_basis_set()
self.assertEqual(self.mamcart.basis_set.shape[0], 84)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamcart.basis_set))))
self.mamsphr.parse_basis_set()
self.assertEqual(self.mamsphr.basis_set.shape[0], 148)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mamsphr.basis_set))))
self.c2h6.parse_basis_set()
self.assertTrue(hasattr(self.c2h6, 'basis_set'))
def test_to_universe(self):
"""Test that the Outputs can be converted to universes."""
uni = self.uo2sp.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamcart.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamsphr.to_universe()
self.assertIs(type(uni), Universe)
class TestOrb(TestCase):
"""Test the Molcas Orb file parser."""
def test_parse_old_uhf(self):
sym = Orb(resource('mol-c2h6-old-sym.uhforb'))
nym = Orb(resource('mol-c2h6-old-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_old_orb(self):
sym = Orb(resource('mol-c2h6-old-sym.scforb'))
nym = Orb(resource('mol-c2h6-old-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_uhf(self):
sym = Orb(resource('mol-c2h6-sym.uhforb'))
nym = Orb(resource('mol-c2h6-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_orb(self):
sym = Orb(resource('mol-c2h6-sym.scforb'))
nym = Orb(resource('mol-c2h6-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_momatrix(self):
"""Test the momatrix table parser."""
uo2sp = Orb(resource('mol-uo2-anomb.scforb'))
uo2sp.parse_momatrix()
self.assertEqual(uo2sp.momatrix.shape[0], 4761)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.momatrix))))
self.assertTrue(np.all(pd.notnull(pd.DataFrame(uo2sp.orbital))))
mamcart = Orb(resource('mol-ch3nh2-631g.scforb'))
mamcart.parse_momatrix()
self.assertEqual(mamcart.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.momatrix))))
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamcart.orbital))))
mamsphr = Orb(resource('mol-ch3nh2-anovdzp.scforb'))
mamsphr.parse_momatrix()
self.assertEqual(mamsphr.momatrix.shape[0], 2809)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamsphr.momatrix))))
self.assertTrue(np.all(pd.notnull(pd.DataFrame(mamsphr.orbital))))
class TestHDF(TestCase):
def setUp(self):
self.nym = HDF(resource('mol-c2h6-nosym-scf.hdf5'))
self.sym = HDF(resource('mol-c2h6-sym-scf.hdf5'))
def test_parse_atom(self):
self.sym.parse_atom()
self.nym.parse_atom()
self.assertTrue(self.sym.atom.shape[0] == 8)
self.assertTrue(self.nym.atom.shape[0] == 8)
def test_parse_basis_set_order(self):
self.sym.parse_basis_set_order()
self.nym.parse_basis_set_order()
self.assertTrue(self.sym.basis_set_order.shape[0] == 30)
self.assertTrue(self.nym.basis_set_order.shape[0] == 30)
def test_parse_orbital(self):
self.sym.parse_orbital()
self.nym.parse_orbital()
self.assertTrue(self.sym.orbital.shape[0] == 30)
self.assertTrue(self.nym.orbital.shape[0] == 30)
def test_parse_overlap(self):
self.sym.parse_overlap()
self.nym.parse_overlap()
self.assertTrue(self.sym.overlap.shape[0])
self.assertTrue(self.nym.overlap.shape[0])
def test_parse_momatrix(self):
self.sym.parse_momatrix()
self.nym.parse_momatrix()
self.assertTrue(self.nym.momatrix.shape[0] == 900)
with self.assertRaises(AttributeError):
self.assertTrue(self.sym.momatrix)
def test_to_universe(self):
self.sym.to_universe()
self.nym.to_universe()
| exa-analytics/exatomic | exatomic/molcas/tests/test_output.py | Python | apache-2.0 | 8,381 |
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
An L2 learning switch.
It is derived from one written live for an SDN crash course.
It is somwhat similar to NOX's pyswitch in that it installs
exact-match rules for each flow.
"""
from __future__ import division
from random import randrange
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
import sys, os, commands, time
from pox.lib.util import dpidToStr
log = core.getLogger()
#-------------------------------define flow rate----------
flow_rate = 50
interval = 1/flow_rate
print 'current flow modification rate is:', flow_rate
global burst
burst = {}
# We don't want to flood immediately when a switch connects.
# Can be overriden on commandline.
_flood_delay = 0
class LearningSwitch (object):
"""
The learning switch "brain" associated with a single OpenFlow switch.
When we see a packet, we'd like to output it on a port which will
eventually lead to the destination. To accomplish this, we build a
table that maps addresses to ports.
We populate the table by observing traffic. When we see a packet
from some source coming from some port, we know that source is out
that port.
When we want to forward traffic, we look up the desintation in our
table. If we don't know the port, we simply send the message out
all ports except the one it came in on. (In the presence of loops,
this is bad!).
In short, our algorithm looks like this:
For each packet from the switch:
1) Use source address and switch port to update address/port table
2) Is transparent = False and either Ethertype is LLDP or the packet's
destination address is a Bridge Filtered address?
Yes:
2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x)
DONE
3) Is destination multicast?
Yes:
3a) Flood the packet
DONE
4) Port for destination address in our address/port table?
No:
4a) Flood the packet
DONE
5) Is output port the same as input port?
Yes:
5a) Drop packet and similar ones for a while
6) Install flow table entry in the switch so that this
flow goes out the appopriate port
6a) Send the packet out appropriate port
"""
def __init__ (self, connection, transparent):
# Switch we'll be adding L2 learning switch capabilities to
self.connection = connection
self.transparent = transparent
# Our table
self.macToPort = {}
# We want to hear PacketIn messages, so we listen
# to the connection
connection.addListeners(self)
# We just use this to know when to log a helpful message
self.hold_down_expired = _flood_delay == 0
#-----------------------
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
# iterate over all connected switches and delete all their flows
connection.send(msg)
print "INFO: Clearing all flows..."
#for BCM switch only
msg = of.ofp_flow_mod()
msg.priority = 10
msg.match.dl_type = 0x800
#msg.match.in_port = 5
msg.match.nw_src = '10.0.0.1'
msg.idle_timeout = 0
msg.hard_timeout = 0
#msg.actions.append(of.ofp_action_output(port = 1))
self.connection.send(msg)
print 'INFO: add a default rule... I am slice 1(BCM only)'
for k in xrange(1,65):#the number of rules to install
#insert first
if k % 2 == 0:
msg = of.ofp_flow_mod()
#msg.match = of.ofp_match.from_packet(packet, event.port)
#msg.priority = 20000 + randrange(1000)
msg.priority = 2000
msg.match.dl_type = 0x800
i = int(k / 256) + 56
j = k % 256
dst = '192.168.' + str(i) + '.' + str(j)
#msg.match.in_port = 1
msg.match.nw_src = '10.0.0.1'
msg.match.nw_dst = dst
#print 'INFO',dst, time.time()
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.actions.append(of.ofp_action_output(port = 2))
#msg.data = event.ofp # 6a
self.connection.send(msg)
time.sleep(0.02)
#-------------------------
# (note that flow_mods match all flows by default)
os.system('./simplesniffer eth2 64&')
os.system('sudo bash ../pktgen/pktgen.conf.1-1-flow-dist.sh &')
time.sleep(5)
y = 0
print 'INFO: starting sending flow mod...'
for k in xrange(1,65):#the number of rules to install
#insert firsti
msg = of.ofp_flow_mod()
if k % 2 == 0:
msg.command = of.OFPFC_MODIFY
#msg.match = of.ofp_match.from_packet(packet, event.port)
#msg.priority = 20000 + randrange(1000)
msg.priority = 2000
msg.match.dl_type = 0x800
i = int(k / 256) + 56
j = k % 256
dst = '192.168.' + str(i) + '.' + str(j)
#msg.match.in_port = 1
msg.match.nw_src = '10.0.0.1'
msg.match.nw_dst = dst
#print 'INFO',dst, time.time()
msg.idle_timeout = 0
msg.hard_timeout = 0
msg.actions.append(of.ofp_action_output(port = 5))
#msg.data = event.ofp # 6a
self.connection.send(msg)
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
#print 'DATA: 10.0.0.1', dst, '%f' %time.time()
burst[dst] = time.time()
#time.sleep(interval)
print 'INFO: flow mod measure finished...'
#write file
w = open('poxout1','w')
for d in burst:
w.write('src: 10.0.0.1 dst: %s sec: %f usec: %f\n' %(d, int(burst[d]), (burst[d] - int(burst[d])) * 1000000 ))
w.close()
os.system('sudo bash cleanpox.sh') #self destrory
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
#print 'PACKET_IN:', event.port, packet.next.dstip,'%f' % time.time()
def _handle_flowstats_received (event):
stats = flow_stats_to_list(event.stats)
print "FlowStatsReceived from %s: %s" % (dpidToStr(event.connection.dpid), stats)
class l2_learning (object):
"""
Waits for OpenFlow switches to connect and makes them learning switches.
"""
def __init__ (self, transparent):
core.openflow.addListeners(self)
self.transparent = transparent
def _handle_ConnectionUp (self, event):
log.debug("Connection %s" % (event.connection,))
LearningSwitch(event.connection, self.transparent)
def launch (transparent=False, hold_down=_flood_delay):
"""
Starts an L2 learning switch.
"""
try:
global _flood_delay
_flood_delay = int(str(hold_down), 10)
assert _flood_delay >= 0
except:
raise RuntimeError("Expected hold-down to be a number")
core.registerNew(l2_learning, str_to_bool(transparent))
| PrincetonUniversity/AdvNet-OF_Scripts | evaluation/switch/flowmod_test/pox/pox/samples/l2_bell_burst_mod.py | Python | apache-2.0 | 7,313 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow.contrib.hooks.gcp_video_intelligence_hook import CloudVideoIntelligenceHook
from google.cloud.videointelligence_v1 import enums
from tests.contrib.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
from tests.compat import mock
INPUT_URI = "gs://bucket-name/input-file"
OUTPUT_URI = "gs://bucket-name/output-file"
FEATURES = [enums.Feature.LABEL_DETECTION]
ANNOTATE_VIDEO_RESPONSE = {'test': 'test'}
class CloudVideoIntelligenceHookTestCase(unittest.TestCase):
def setUp(self):
with mock.patch(
"airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudVideoIntelligenceHook(gcp_conn_id="test")
@mock.patch("airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.get_conn")
def test_annotate_video(self, get_conn):
# Given
annotate_video_method = get_conn.return_value.annotate_video
get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE
# When
result = self.hook.annotate_video(input_uri=INPUT_URI, features=FEATURES)
# Then
self.assertIs(result, ANNOTATE_VIDEO_RESPONSE)
annotate_video_method.assert_called_once_with(
input_uri=INPUT_URI,
input_content=None,
features=FEATURES,
video_context=None,
output_uri=None,
location_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook.get_conn")
def test_annotate_video_with_output_uri(self, get_conn):
# Given
annotate_video_method = get_conn.return_value.annotate_video
get_conn.return_value.annotate_video.return_value = ANNOTATE_VIDEO_RESPONSE
# When
result = self.hook.annotate_video(input_uri=INPUT_URI, output_uri=OUTPUT_URI, features=FEATURES)
# Then
self.assertIs(result, ANNOTATE_VIDEO_RESPONSE)
annotate_video_method.assert_called_once_with(
input_uri=INPUT_URI,
output_uri=OUTPUT_URI,
input_content=None,
features=FEATURES,
video_context=None,
location_id=None,
retry=None,
timeout=None,
metadata=None,
)
| owlabs/incubator-airflow | tests/contrib/hooks/test_gcp_video_intelligence_hook.py | Python | apache-2.0 | 3,290 |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
#Added
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
"description": '',
"highlights": ["Default"],
"duration": 0.0,
"users": []
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
## create Resource container for post request with Sessions
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1)
)
## and for a GET Session request
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1)
)
SESSION_GETBYNAME = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1)
)
SESSION_GETBYTYPE = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionType=messages.StringField(1),
websafeConferenceKey=messages.StringField(2)
)
USERWISHLIST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey = messages.StringField(1)
)
GET_FEATURED_SPEAKER = endpoints.ResourceContainer(
speaker = messages.StringField(1)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# Task 1.)
# Sessions
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copySessionToForm(self, session):
"""Copy relevant fields from Conference to ConferenceForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
if field.name == 'date':
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# get the conf that the session should be added to
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.UnauthorizedException("There must be a valid conference to add the sessions to")
if not request.speaker:
raise endpoints.BadRequestException("Session 'speaker' field required")
if not request.speaker:
raise endpoints.BadRequestException("Session 'type' field required")
# copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data["websafeConferenceKey"]
## Check to see if valid start time. Must be between 1-12 am and 1-12 pm
## The format should be 00:xx ex: 09:am
if data['startTime']:
hour = int(data['startTime'][0:2])
ampm = data['startTime'][3:]
print ampm
if not (hour <= 12 and hour >= 1):
raise endpoints.BadRequestException("Start time must be between 1 and 12")
if not (ampm == 'am' or ampm == 'AM' or ampm == 'pm' or ampm == 'PM'):
raise endpoints.BadRequestException("Start time must be either am or pm")
else:
raise endpoints.BadRequestException("We need to know the start time of the session")
# add default values for those missing (both data model & outbound Message)
# convert dates from strings to Date objects; set month based on start_date
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
else:
raise endpoints.BadRequestException("Session start date required")
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# if there is a refrence to the Conference that the session is for then
# make the session a child of that Conference.
# creating the session key
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
s_key = ndb.Key(Session, s_id, parent=conf.key)
data["key"] = s_key
Session(**data).put()
## Additions for Task 4
## first get current featured speaker
curr_speaker = data["speaker"]
taskqueue.add(params={'speaker':curr_speaker, 'websafeConferenceKey': conf.key.urlsafe()},
url='/tasks/setFeaturedSpeaker')
return self._copySessionToForm(request)
# Task 4 Endpoint for getting the current featured speaker
@endpoints.method(message_types.VoidMessage,StringMessage,path='featuredspeaker',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self,request):
"""Return the featured speaker for the session """
featured_speaker = memcache.get("featured_speaker")
# if there is not speaker then tell the 'user' there is no speaker
if featured_speaker == None:
featured_speaker = "There is no current featured speaker"
# using the string message class from models.py
string_message = StringMessage()
setattr(string_message,"data",featured_speaker)
return string_message
# Task 1 Enpoint for creating a session
@endpoints.method(SESSION_POST_REQUEST,SessionForm,path='session/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self,request):
"""Create new session """
return self._createSessionObject(request)
# Task 1 Endpoint for fetching a list of all current sessions of a conference
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path='session/{websafeConferenceKey}',
http_method='GET', name='getSessions')
def getSessions(self,request):
"""Create new session """
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ancestor=conf.key)
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 1 Endpoint for getting all sessions of a speaker
@endpoints.method(SESSION_GETBYNAME, SessionForms,
path='session/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Return requested session (by username)."""
# get Conference object from request; bail if not found
if not request.speaker:
raise endpoints.BadRequestException("You must pass the name of the speaker")
# the speaker can have more than one session
sessions = Session.query(Session.speaker == request.speaker)
# return SessionForm
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 1 Enpoint for getting all sessions of a given type
@endpoints.method(SESSION_GETBYTYPE, SessionForms,
path='session',
http_method='GET', name='getSessionByType')
def getSessionByType(self, request):
"""Return requested session (by type)."""
# get Conference object from request; bail if not found
if not request.sessionType:
raise endpoints.BadRequestException("You must pass the type of the session")
if not request.websafeConferenceKey:
raise endpoints.BadRequestException("You must pass a conference key")
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(Session.sessionType == request.sessionType,
ancestor=conf.key)
# return SessionForm
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 2.)
## --- User wish list MEthods
# add a wishlist to a given session for the current user
@endpoints.method(USERWISHLIST,SessionForm,path='wishlist/{sessionKey}',
http_method='POST', name='addToWishList')
def addToWishList(self,request):
if not request.sessionKey:
raise BadRequestException("You must pass a session key")
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# append the current user to the wishlist property
session_key = ndb.Key(urlsafe=request.sessionKey)
session = session_key.get()
## Only add the user if he does not currently have the session in his wishlist
if user_id in session.users:
raise BadRequestException("You are already in this session")
else:
session.users.append(user_id)
session.put()
return self._copySessionToForm(session)
# Task 2 endpoint delete current user from given wish list
@endpoints.method(USERWISHLIST,SessionForm,path='deleteWishlist/{sessionKey}',
http_method='POST', name='deleteFromWishList')
def deleteFromWishList(self,request):
if not request.sessionKey:
raise BadRequestException("You must pass a session key")
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
session_key = ndb.Key(urlsafe=request.sessionKey)
session = session_key.get()
# refrencing the session users property call the python remove function
# to remove user from the current users list
# only remove from the users list if the user is in it otherwise return a error
if user_id in session.users:
session.users.remove(user_id)
else:
raise BadRequestException("You do not have this session in your wishlist")
session.put()
return self._copySessionToForm(session)
# Task 2 endpoint that returns the full wishlist of the current user
@endpoints.method(message_types.VoidMessage,SessionForms,path='wishlist',
http_method='GET', name='getCurrentWishList')
def getCurrentWishList(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
sessions = Session.query(Session.users.IN([user_id]))
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# FOR Task 3
# query wishlist for a given conference
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path='wishlistByConference/{websafeConferenceKey}',
http_method='GET',name='getWishListByConference')
def getWishListByConference(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ndb.AND(Session.users.IN([user_id]),ancestor=conf.key))
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# query Sessions that start at a specific time
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path="sessionsByStartTime",
http_method='GET',name='getSessionsByTime')
def getSessionsByTime(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.startTime:
raise BadRequestException("You must pass a startime in the format 12:am")
# Since we are not quering on a specif
sessions = Session.query(startime=request.startTime).get()
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
## ------- Conference MEthods
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API
| befeltingu/UdacityFinalProject4 | conference.py | Python | apache-2.0 | 33,399 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ServerAddressByClientCIDR(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'client_cidr': 'str',
'server_address': 'str'
}
attribute_map = {
'client_cidr': 'clientCIDR',
'server_address': 'serverAddress'
}
def __init__(self, client_cidr=None, server_address=None, local_vars_configuration=None): # noqa: E501
"""V1ServerAddressByClientCIDR - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._client_cidr = None
self._server_address = None
self.discriminator = None
self.client_cidr = client_cidr
self.server_address = server_address
@property
def client_cidr(self):
"""Gets the client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
The CIDR with which clients can match their IP to figure out the server address that they should use. # noqa: E501
:return: The client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
:rtype: str
"""
return self._client_cidr
@client_cidr.setter
def client_cidr(self, client_cidr):
"""Sets the client_cidr of this V1ServerAddressByClientCIDR.
The CIDR with which clients can match their IP to figure out the server address that they should use. # noqa: E501
:param client_cidr: The client_cidr of this V1ServerAddressByClientCIDR. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and client_cidr is None: # noqa: E501
raise ValueError("Invalid value for `client_cidr`, must not be `None`") # noqa: E501
self._client_cidr = client_cidr
@property
def server_address(self):
"""Gets the server_address of this V1ServerAddressByClientCIDR. # noqa: E501
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port. # noqa: E501
:return: The server_address of this V1ServerAddressByClientCIDR. # noqa: E501
:rtype: str
"""
return self._server_address
@server_address.setter
def server_address(self, server_address):
"""Sets the server_address of this V1ServerAddressByClientCIDR.
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port. # noqa: E501
:param server_address: The server_address of this V1ServerAddressByClientCIDR. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and server_address is None: # noqa: E501
raise ValueError("Invalid value for `server_address`, must not be `None`") # noqa: E501
self._server_address = server_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServerAddressByClientCIDR):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServerAddressByClientCIDR):
return True
return self.to_dict() != other.to_dict()
| kubernetes-client/python | kubernetes/client/models/v1_server_address_by_client_cidr.py | Python | apache-2.0 | 5,238 |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides process view
This module provides a view for
visualizing processes in human-readable formm
"""
import cinder.openstack.common.report.views.jinja_view as jv
class ProcessView(jv.JinjaView):
"""A Process View
This view displays process models defined by
:class:`openstack.common.report.models.process.ProcessModel`
"""
VIEW_TEXT = (
"Process {{ pid }} (under {{ parent_pid }}) "
"[ run by: {{ username }} ({{ uids.real|default('unknown uid') }}),"
" state: {{ state }} ]\n"
"{% for child in children %}"
" {{ child }}"
"{% endfor %}"
)
| saeki-masaki/cinder | cinder/openstack/common/report/views/text/process.py | Python | apache-2.0 | 1,233 |
#
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
from util import OpenCenterTestCase
import opencenter.db.api as db_api
from opencenter.webapp import ast
api = db_api.api_from_models()
class ExpressionTestCase(OpenCenterTestCase):
def setUp(self):
self.nodes = {}
self.interfaces = {}
self.nodes['node-1'] = self._model_create('nodes', name='node-1')
self.interfaces['chef'] = self._model_create('filters', name='chef',
filter_type='interface',
expr='facts.x = true')
self.nodes['container'] = self._model_create('nodes', name='container')
def tearDown(self):
self._clean_all()
def _run_expression(self, node, expression, ns={}):
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression,
api=api)
root_node = builder.build()
return root_node.eval_node(node, symbol_table=ns)
def _simple_expression(self, expression):
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
return self._run_expression(node,
'nodes: %s' % expression)
def _invert_expression(self, expression, ns={}):
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression)
root_node = builder.build()
return root_node.invert()
def _eval_expression(self, expression, node_id, ns={}):
ephemeral_api = db_api.ephemeral_api_from_api(api)
builder = ast.FilterBuilder(ast.FilterTokenizer(), expression,
api=ephemeral_api)
node = ephemeral_api._model_get_by_id('nodes', node_id)
builder.eval_node(node, symbol_table=ns)
new_node = ephemeral_api._model_get_by_id('nodes', node_id)
return new_node
def test_bad_interface(self):
expression = "ifcount('blahblah') > 0"
self.assertRaises(SyntaxError, self._run_expression,
self.nodes['node-1'], expression)
def test_zero_ifcount(self):
expression = "ifcount('chef') > 0"
result = self._run_expression(self.nodes['node-1'], expression)
self.logger.debug('Got result: %s' % result)
self.assertEquals(result, False)
def test_valid_ifcount(self):
expression = "ifcount('chef') > 0"
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='x', value=True)
result = self._run_expression(self.nodes['node-1'], expression)
self.logger.debug('Got result: %s' % result)
self.assertEquals(result, True)
def test_invert_equals(self):
expression = "facts.test = 'test'"
result = self._invert_expression(expression)
self.assertEquals(result, ["facts.test := 'test'"])
def test_invert_and(self):
expression = "facts.test='test' and facts.x='x'"
result = self._invert_expression(expression)
self.assertTrue("facts.test := 'test'" in result)
self.assertTrue("facts.x := 'x'" in result)
def test_invert_in(self):
expression = "'test' in facts.foo"
result = self._invert_expression(expression)
self.assertTrue("facts.foo := union(facts.foo, 'test')" in result)
self.assertEquals(len(result), 1)
def test_invert_not_in(self):
expression = "'test' !in facts.foo"
result = self._invert_expression(expression)
self.assertTrue("facts.foo := remove(facts.foo, 'test')" in result)
self.assertEquals(len(result), 1)
def test_eval_assign(self):
node_id = self.nodes['node-1']['id']
expression = "facts.parent_id := %d" % int(
self.nodes['container']['id'])
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts'].get('parent_id', None),
self.nodes['container']['id'])
def test_eval_union(self):
node_id = self.nodes['node-1']['id']
expression = "facts.woof := union(facts.woof, 3)"
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['woof'], [3])
def test_eval_remove(self):
node_id = self.nodes['node-1']['id']
fact = self._model_create('facts', node_id=node_id,
key='array_fact', value=[1, 2])
expression = 'facts.array_fact := remove(facts.array_fact, 2)'
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['array_fact'], [1])
# verify removing from none returns none. This is perhaps
# questionable, but is inline with the rest of the none/empty
# behavior. It could probably also return [], but enforce
# current behavior
self._model_delete('facts', fact['id'])
expression = 'facts.array_fact := remove(facts.array_fact, "test")'
node = self._eval_expression(expression, node_id)
self.assertEquals(node['facts']['array_fact'], None)
# verify removing from a non-list raises SyntaxError
self._model_create('facts', node_id=node_id,
key='array_fact', value='non-array')
expression = 'facts.array_fact := remove(facts.array_fact, "whoops")'
self.assertRaises(SyntaxError, self._eval_expression,
expression, node_id)
def test_eval_namespaces(self):
node_id = self.nodes['node-1']['id']
expression = "facts.parent_id := value"
ns = {"value": self.nodes['container']['id']}
node = self._eval_expression(expression, node_id, ns)
self.assertEquals(node['facts'].get('parent_id', None),
self.nodes['container']['id'])
# test the inverter and regularizer functions
def test_regularize_expression(self):
expression = 'foo=value'
regular = ast.regularize_expression(expression)
self.logger.debug('Got regularized expression "%s" for "%s"' %
(regular, expression))
self.assertEquals(regular, 'foo = value')
def test_inverted_expression(self):
expression = 'foo=value'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'foo := value')
def test_inverted_union(self):
expression = 'facts.test := union(facts.test, test)'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'test in facts.test')
def test_inverted_remove(self):
expression = 'facts.test := remove(facts.test, test)'
inverted = ast.invert_expression(expression)
self.logger.debug('Got inverted expression "%s" for "%s"' %
(inverted, expression))
self.assertEquals(len(inverted), 1)
self.assertEquals(inverted[0], 'test !in facts.test')
def test_concrete_expression(self):
expression = "foo = value"
ns = {"value": 3}
concrete = ast.concrete_expression(expression, ns)
self.logger.debug('Got concrete expression "%s" for "%s"' %
(concrete, expression))
# TODO(rpedde): This does not work like you think it does
# self.assertTrue('foo = 3', concrete)
# Using an assertEquals of the above fails
# self.assertEquals(concrete, 'foo = 3')
# But this works
self.assertEquals(concrete, 'foo = value')
def test_apply_expression(self):
expression = 'facts.test := union(facts.test, "test")'
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
# make sure we are applying into an empty fact
self.assertFalse('test' in node['facts'])
ast.apply_expression(self.nodes['node-1']['id'], expression, api)
node = self._model_get_by_id('nodes', self.nodes['node-1']['id'])
self.assertTrue('test' in node['facts'])
self.assertEquals(node['facts']['test'], ['test'])
# FIXME: when we get types
def test_util_nth_with_none(self):
expression = 'nth(0, facts.test)' # nth of none?
res = self._simple_expression(expression)
self.assertIsNone(res)
# FIXME: when we get types
def test_util_nth_not_integer(self):
expression = 'nth("a", facts.test)' # raise with type error?
res = self._simple_expression(expression)
self.assertIsNone(res)
# FIXME: when we get types
def test_util_nth_index_out_of_range(self):
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=[1, 2, 3])
self.assertTrue(self._simple_expression('nth(2, facts.test)') is 3)
self.assertIsNone(self._simple_expression('nth(3, facts.test)'))
# FIXME: when we get types
def test_str_casting_none(self):
# this should fail, too, I think
self.assertIsNone(self._simple_expression('str(facts.test)'))
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=[1, 2, 3])
self.assertEquals(self._simple_expression('str(facts.test)'),
'[1, 2, 3]')
self._model_create('facts', node_id=self.nodes['node-1']['id'],
key='test', value=1)
self.assertEquals(self._simple_expression('str(facts.test)'), '1')
| rcbops/opencenter | tests/test_expressions.py | Python | apache-2.0 | 11,034 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _LE('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
if CONF.trusted_computing:
self.asset_tag_filter = TrustAssertionFilter()
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class CleanableManager
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
msg = _LI("Deleted volume successfully.")
if unmanage_only:
msg = _LI("Unmanaged volume successfully.")
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = _LI("Delete snapshot completed successfully.")
if unmanage_only:
msg = _LI("Unmanage snapshot completed successfully.")
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
self.message_api.create(
context, defined_messages.EventIds.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.'),
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Detaching volume %(volume_id)s from instance '
'%(instance)s.'),
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context,
defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host',
'cluster_name'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
volume_attachments = []
if orig_volume_status == 'in-use':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error(_LE("Detach migration source volume "
"%(volume.id)s from instance "
"%(instance_id)s failed: %(err)s"),
{'err': ex,
'volume.id': volume.id,
'instance_id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# Restore the attachmens
if orig_volume_status == 'in-use':
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_replicated_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_my_resources(self, ctxt, ovo_class_list):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
def _get_my_volumes(self, ctxt):
return self._get_my_resources(ctxt, objects.VolumeList)
def _get_my_snapshots(self, ctxt):
return self._get_my_resources(ctxt, objects.SnapshotList)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update = self._create_group_generic(context,
group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_group(context, group.id)
for volume in volumes:
if (volume.attach_status ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.save()
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
self.db.volume_destroy(context, volume.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume.size
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ovo.id,
'group_id': group.id,
'status': add_vol_ovo.status,
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ovo)
add_volumes_ref.append(add_vol_ovo)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ref)
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
volumes = self._get_my_volumes(context)
exception_encountered = True
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
active_backend_id, volume_update_list = failover(
context,
volumes,
secondary_id=secondary_backend_id)
exception_encountered = False
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
updates['replication_status'] = repl_status.FAILOVER_ERROR
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status: Status should be failed over if
# we were failing back or if we were failing over from one
# secondary to another secondary. In both cases active_backend_id
# will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup, want_objects=False):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
ctxt)
if want_objects else backup_device_dict)
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching', }
self.db.volume_attachment_update(ctxt, attachment.id, values)
self.db.attachment_specs_update_or_create(
ctxt,
attachment.id,
connector)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
# FIXME(jdg): get rid of this admin_meta option here, the only thing
# it does is enforce that a volume is R/O, that should be done via a
# type and not *more* metadata
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(),
attachment_ref.volume_id,
{'attached_mode': mode}, False)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, vref.id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=vref.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=vref.id)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'))
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'),
mode)
vref.refresh()
self._notify_about_volume_usage(context, vref, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=vref)
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment."""
utils.require_driver_initialized(self.driver)
# TODO(jdg): Add an object method to cover this
connector = self.db.attachment_specs_get(
context,
attachment.id)
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
has_shared_connection = False
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
if self._do_attachment_delete(context, vref, attachment):
has_shared_connection = True
else:
has_shared_connection = (
self._do_attachment_delete(context, vref, attachment_ref))
return has_shared_connection
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
self.driver.detach_volume(context, vref, attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
return has_shared_connection
def is_volume_trusted(self, ctxt, volume_id):
volume = self.db.api.volume_get(ctxt, volume_id)
verify_trust = False
asset_tags = 'None'
host = ''
for metadata in volume.volume_metadata:
if metadata.key == 'trust':
host = volume.host.split("@")[0]
verify_trust = True
elif metadata.key == 'asset_tags':
asset_tags = metadata.value
if verify_trust:
return self.asset_tag_filter.is_trusted(host, asset_tags)
return None | ge0rgi/cinder | cinder/volume/manager.py | Python | apache-2.0 | 211,218 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import types_of_service
class as_external_lsa(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of the AS External LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service")
_yang_name = "as-external-lsa"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
YANG Description: State parameters for the AS external LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for the AS external LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_types_of_service(self):
"""
Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
return self.__types_of_service
def _set_types_of_service(self, v, load=False):
"""
Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_types_of_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_types_of_service() directly.
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """types_of_service must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__types_of_service = t
if hasattr(self, "_set"):
self._set()
def _unset_types_of_service(self):
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
types_of_service = __builtin__.property(_get_types_of_service)
_pyangbind_elements = OrderedDict(
[("state", state), ("types_of_service", types_of_service)]
)
from . import state
from . import types_of_service
class as_external_lsa(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of the AS External LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service")
_yang_name = "as-external-lsa"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
YANG Description: State parameters for the AS external LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for the AS external LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_types_of_service(self):
"""
Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
return self.__types_of_service
def _set_types_of_service(self, v, load=False):
"""
Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_types_of_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_types_of_service() directly.
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """types_of_service must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__types_of_service = t
if hasattr(self, "_set"):
self._set()
def _unset_types_of_service(self):
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
types_of_service = __builtin__.property(_get_types_of_service)
_pyangbind_elements = OrderedDict(
[("state", state), ("types_of_service", types_of_service)]
)
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/__init__.py | Python | apache-2.0 | 19,446 |
__author__ = 'thatcher'
from django.contrib import admin
# from django.contrib.auth.models import User
# from django.contrib.auth.admin import UserAdmin
# from django.contrib.sessions.
from django.contrib.sessions.models import Session
from .models import *
from base.forms import *
def images_thubmnail(self):
return '<img style="max-height: 80px; width: auto;" src="{}" alt="{}" >'.format(self.uri(), self.alt)
# return self.uri()
images_thubmnail.short_description = 'Thumbnail'
images_thubmnail.allow_tags = True
class TeamMemberAdmin(admin.ModelAdmin):
model = TeamMember
list_display = ['full_name', 'sort_weight', 'show_as_team']
admin.site.register(TeamMember, TeamMemberAdmin)
class NewsItemAdmin(admin.ModelAdmin):
model = NewsItem
list_display = ['id', 'title', 'publication_date', 'show', 'author']
admin.site.register(NewsItem, NewsItemAdmin)
class EventAdmin(admin.ModelAdmin):
model = Event
list_display = ['title', 'location', 'date_and_time']
admin.site.register(Event, EventAdmin)
class PostAdmin(admin.ModelAdmin):
model = GenericPost
list_display = ['title', 'category', 'publication_date']
admin.site.register(GenericPost, PostAdmin)
class CategoryAdmin(admin.ModelAdmin):
model = PostCategory
list_display = ['name', 'added_date']
admin.site.register(PostCategory, CategoryAdmin)
class ImageAdmin(admin.ModelAdmin):
model = Image
list_display = [images_thubmnail, 'alt', 'image_caption', 'image', ]
admin.site.register(Image, ImageAdmin)
class TagAdmin(admin.ModelAdmin):
model = Tag
list_display = ['name', 'added_date']
admin.site.register(Tag, TagAdmin)
| ZmG/trywsk | base/admin.py | Python | apache-2.0 | 1,654 |
#!/usr/bin/python
import json
import time
import parcon
import operator
import pprint
import os
import sys
import getopt
import re
import optparse
import string
import hashlib
import parse_objc as parser
import sign
#### tool version:
VERSION = parser.VERSION
VERSION_STR = sign.source_file_signature(__file__, VERSION)
#### shortcuts:
ID = parser.KEY_ID
pretty_json = parser.pretty_json
pretty_pprint = parser.pretty_pprint
log = parser.log
log_info = parser.log_info
################## Templates ############################
#########################################################
####
notifier_initializer_declaration_event_blocks_template = """
/**
Initialize an instance of the ${notifier_name} with the given event blocks (can be nil).
IMPORTANT:
1. to avoid retain cycles, the addedFirst/removedLast blocks should not reference '__strong self'. '__weak self' is fine.
2. rule of thumb: in case one of the addedFirst/removedLast blocks is provided, chances are the other block is needed as well.
@param addedFirst a block to be invoked after the first subscription has been added
@param removedLast a block to be invoked after the last subscription has been removed
*/
- (instancetype)initWithFirstSubscriptionAdded:(${notifier_name}FirstSubscriptionAdded)addedFirst
lastSubscriptionRemoved:(${notifier_name}LastSubscriptionRemoved)removedLast
""".strip()
####
notifier_initializer_declaration_template = """
/**
Initialize an instance of the ${notifier_name}
*/
- (instancetype)init
""".strip()
####
notifier_initializer_implementation_event_blocks_template = """
{
self = [super init];
if (self) {
_baseImplementation = [[WNNotifierBase alloc] initAtomic:${is_notifier_atomic} /* atomic */
firstSubscriptionBlock:addedFirst
lastSubscriptionBlock:removedLast];
}
return self;
}
- (instancetype)init
{
NSAssert(NO,
@"ERROR: please use: initWithFirstSubscriptionAdded:lastSubscriptionRemoved: to init this object");
return nil;
}
""".strip()
####
notifier_initializer_implementation_template = """
{
self = [super init];
if (self) {
_baseImplementation = [[WNNotifierBase alloc] initAtomic:${is_notifier_atomic}
firstSubscriptionBlock:nil
lastSubscriptionBlock:nil];
}
return self;
}
""".strip()
####
enumerator_typedef_template = """
typedef void (^${notifier_name}Visitor)(${notifier_name}Subcription* subscription)
""".strip()
####
first_subscription_added_typedef_template = """
typedef void (^${notifier_name}FirstSubscriptionAdded)(${notifier_name}Subcription* subscription)
""".strip()
####
last_subscription_removed_typedef_template = """
typedef void (^${notifier_name}LastSubscriptionRemoved)(${notifier_name}Subcription* subscription)
""".strip()
####
event_processor_typedef_template = """
typedef void (^${notifier_name}EventProcessor)(SEL selector, NSDictionary* arguments)
""".strip()
####
event_processor_property_template = """
/**
a block to process the notified events as a sequence of (SEL, NSDictionary* arguments) tuples.
a perfect use case for this feature is a file / network logger of events.
IMPORTANT: 1. even though this is a 'readwrite' property,
it's unadvised to write this property more than once.
2. to avoid a retain cycle, the block should avoid
referencing '__strong self', and prefer '__weak self' instead.
*/
@property (copy, readwrite) ${notifier_name}EventProcessor eventProcessor;
""".strip()
###
notifier_interface_template = """
@interface ${notifier_name} : NSObject <${listener_name}>
${notifier_initializer_declaration};
/**
Register the given subscription object ${listener_name} to be notified.
The notifications will be delivered to subscription->listener
for the lifecycle of the provided subscription object.
IMPORTANT: This API is NOT idempotent.
@param subscription - subscription object to be added.
*/
-(void)addSubscription:(${notifier_name}Subcription *)subscription;
/**
Unregister the given subscription object ${listener_name} from being notified.
@param subscription - subscription object to be removed
*/
-(void)removeSubscription:(${notifier_name}Subcription *)subscription;
${enumerator_declaration}
${event_processor_property}
@end
""".strip()
###
notifier_subscription_listener_context_property_template = """
@property (atomic, readonly, ${listener_context_ref}) id listenerContext
""".strip()
###
notifier_subscription_initializer_declaration_with_context_template = """
- (instancetype)initWithListener:(id <${listener_name}>)listener
listenerQueue:(dispatch_queue_t)listenerQueue
listenerContext:(id)listenerContext
""".strip()
###
notifier_subscription_initializer_declaration_no_context_template = """
- (instancetype)initWithListener:(id <${listener_name}>)listener
listenerQueue:(dispatch_queue_t)listenerQueue
""".strip()
###
notifier_subscription_interface_template = """
@interface ${notifier_name}Subcription : NSObject
${notifier_subscription_initializer_declaration}
@property (atomic, readonly, ${listener_ref}) id <${listener_name}> listener;
@property (atomic, readonly, strong) dispatch_queue_t listenerQueue;
${notifier_subscription_listener_context_property}
@end
"""
###
notifier_subscription_implementation_template = """
@implementation ${notifier_name}Subcription
${notifier_subscription_initializer_declaration}
{
self = [super init];
if (self) {
_listener = listener;
_listenerQueue = listenerQueue;
${notifier_subscription_implementation_extension}
}
return self;
}
@end
""".strip()
####
typedefs_template = """
${enumerator_typedef}${first_subscription_added_typedef}${last_subscription_removed_typedef}
${event_processor_typedef}
""".strip()
####
documentation_header_template = """
/**
Purpose:
========
Notifier for the ${listener_name} protocol defined in: ${listener_base_filename}
Annotations Used:
=================
${annotation_as_json_string}
*/
""".strip()
####
file_template_notifier_h = """
// @tool ${generated_by}
// @input_hash ${input_file_hash}
#import <Foundation/Foundation.h>
#import <dispatch/dispatch.h>
#import "${listener_name}.h"
${documentation_header}
@class ${notifier_name}Subcription;
${typedefs}${notifier_interface}${notifier_subscription_interface}
""".strip()
####
file_template_notifier_m = """
// @tool ${generated_by}
// @input_hash ${input_file_hash}
#if ! __has_feature(objc_arc)
#error This file must be compiled with ARC. Use -fobjc-arc flag (or convert project to ARC).
#endif
#import <dispatch/dispatch.h>
#import "${notifier_name}.h"
#import <WNNotifier/WNNotifierBase.h>
${notifier_subscription_implementation}
@implementation ${notifier_name} {
WNNotifierBase* _baseImplementation;
}
${notifier_initializer_declaration}
${notifier_initializer_implementation}
-(void)addSubscription:(${notifier_name}Subcription *)subscription
{
[_baseImplementation addSubscription:subscription];
}
-(void)removeSubscription:(${notifier_name}Subcription *)subscription
{
[_baseImplementation removeSubscription:subscription];
}
${enumerator_implementation}
${protocol_implementation}
@end
""".strip()
####
enumerator_implementation_template = """
-(void)enumerateSubscriptionsUsingBlock:(${notifier_name}Visitor)visitor
{
if (!visitor) {
return;
}
[_baseImplementation enumerateSubscriptionsUsingBlock:^bool(${notifier_name}Subcription * subscription) {
visitor(subscription);
return ((id<${listener_name}>)subscription.listener) != nil;
}];
}
""".strip()
####
enumerator_declaration_template = """
/**
Enumerate the current subscriptions collection with the given visitor block.
@param visitor - the block to be used to enumerate the current set of subscriptions
*/
-(void)enumerateSubscriptionsUsingBlock:(${notifier_name}Visitor)visitor;
""".strip()
####
method_event_processor_implementation_template = """
if (_eventProcessor) {
_eventProcessor(_cmd,
@{${event_dictionary_content}
});
}
""".strip()
####
method_required_implementation_template = """
${mdeclaration}
{
${method_event_processor_implementation}
[_baseImplementation enumerateSubscriptionsUsingBlock:^(${notifier_name}Subcription * subscription) {
return WNNotifierBaseNotify(
subscription.listener,
subscription.listenerQueue,
^(id<${listener_name}> listener) {
[listener ${minvocation}];
});
}];
}
""".strip()
####
method_optional_implementation_template = """
${mdeclaration}
{
${method_event_processor_implementation}
[_baseImplementation enumerateSubscriptionsUsingBlock:^(${notifier_name}Subcription * subscription) {
return WNNotifierBaseNotify(
subscription.listener,
subscription.listenerQueue,
^(id<${listener_name}> listener) {
if ([listener respondsToSelector:@selector(${mselector})]) {
[listener ${minvocation}];
}
});
}];
}
""".strip()
####
def verify_annotation(annotation):
log_info("annotation, %s" % annotation)
if annotation["atomic"] not in (True , False):
raise Exception("atomic : can only be 'true' or 'false', not, %s" % annotation["atomic"])
if annotation["listener-ref"] not in ("weak" , "strong"):
raise Exception("listener-ref : can only be 'weak' or 'strong', not, %s" % annotation["listener-ref"])
if annotation["event-blocks"] not in (True, False):
raise Exception("event-blocks : can only be 'True' or 'False', not, %s" % annotation["event-blocks"])
if annotation["event-processor-block"] not in (True, False):
raise Exception("event-processor-block : can only be 'True' or 'False', not, %s" % annotation["event-processor-block"])
if annotation["enumerate"] not in (True, False):
raise Exception("enumerate : can only be 'True' or 'False', not, %s" % annotation["enumerate"])
if len(annotation["listener-context-keyword"]) > 0 and not annotation["listener-context-keyword"].isalpha():
raise Exception("listener-context-keyword : should be a all alpha word, not, %s" % annotation["listener-context-keyword"])
if annotation["listener-context-ref"] not in ("weak", "strong", ""):
raise Exception("listener-context-ref : can only be 'weak' or 'strong' or '', not, %s" % annotation["listener-context-ref"])
####
def gen_event_processor_implementation(annotation, method):
"""
generate an event dictionary for the given 'method'
"""
event_dictionary_content = []
for keyword in method['mkeywords']:
keyword_name = keyword["keyword"]
keyword_type = keyword["type"]
if keyword.has_key("arg") and keyword['arg']:
at = ""
if keyword_type["name"] in parser.primitive_type_names():
at = "@"
keyword_arg = "WNNotifierBaseConvertToNSNull(" + at + "(" + keyword["arg"] + ")" + ")"
else:
keyword_arg = "[NSNull null]"
event_dictionary_content.append(
string.Template("""@"${keyword_name}" : ${keyword_arg}, """)
.substitute(
{ "keyword_name" : keyword_name,
"keyword_arg" : keyword_arg}))
event_dictionary_content = "\n ".join(event_dictionary_content)
method_event_processor_implementation = string.Template(
method_event_processor_implementation_template).substitute(
event_dictionary_content=event_dictionary_content)
return method_event_processor_implementation
####
def gen_notifier_v2_for_protocol(options, filename, objects, prot_object):
parser.protocol_methods_update_decorations(prot_object)
output_dir, base_filename = os.path.split(filename)
listener_name = prot_object["name"]
notifier_name = listener_name + "Notifier"
if options.notifier_name:
notifier_name = options.notifier_name
# get the annotation:
annotation_default = {
"atomic" : False,
"listener-ref" : "weak",
"event-blocks" : False,
"enumerate" : False,
"listener-context-keyword" : "",
"listener-context-ref" : "",
"event-processor-block" : False
}
annotation = annotation_default.copy()
if "json" in prot_object["WNNotifierGenerate"]:
annotation.update(prot_object["WNNotifierGenerate"]["json"])
verify_annotation(annotation)
protocol_implementation = ""
methods = prot_object["methods"]
# build up the implementation, method by method
for method in methods:
# default template params:
template_params = {}
# override the argument for listener-context-keyword
override_arg_for_keywords = {}
keyword_name = annotation["listener-context-keyword"]
if len(keyword_name):
keyword_arg = "%s ? %s : subscription.listenerContext" % (
keyword_name,
keyword_name)
override_arg_for_keywords = {keyword_name : keyword_arg}
# build a declaration, invocation and a selector for this method:
(mdeclaration, minvocation, mselector) = parser.protocol_method_recostruct(
method,
override_arg_for_keywords)
# generate the event processor code:
method_event_processor_implementation = ""
if annotation["event-processor-block"]:
method_event_processor_implementation = \
gen_event_processor_implementation(annotation, method)
# function implementation:
template_string = method_required_implementation_template
if method["decoration"] == ["@optional"]:
template_string = method_optional_implementation_template
template_string = template_string.strip()
# template parameters:
template_params.update({
"listener_name" : listener_name,
"notifier_name" : notifier_name,
"mdeclaration" : mdeclaration,
"minvocation" : minvocation,
"mselector" : mselector,
"method_event_processor_implementation" : method_event_processor_implementation,
})
# method implementation:
method_implementation = parser.remove_empty_lines(
string.Template(template_string).substitute(template_params))
# keep going:
protocol_implementation += "\n\n" + method_implementation
# hash the input file:
input_file_hash = base_filename + ":" + sign.sign_data(open(filename, "r").read())
is_notifier_atomic = "NO"
if annotation["atomic"]:
is_notifier_atomic = "YES"
# requested ref types:
listener_ref = annotation["listener-ref"]
listener_context_ref = annotation["listener-context-ref"]
# embed the annotations into the generated file:
annotation_as_json_string = "WNNotifierGenerate(%s)" % pretty_json(annotation)
# basic params:
template_params.update({
"generated_by" : VERSION_STR,
"notifier_name" : notifier_name,
"listener_name" : listener_name,
"is_notifier_atomic" : is_notifier_atomic,
"listener_ref" : listener_ref,
"listener_context_ref" : listener_context_ref,
"annotation_as_json_string" : annotation_as_json_string,
"listener_base_filename" : base_filename
})
# enumerators:
enumerator_implementation = ""
enumerator_declaration = ""
enumerator_typedef = ""
if annotation["enumerate"]:
enumerator_implementation = string.Template(enumerator_implementation_template).substitute(template_params)
enumerator_declaration = string.Template(enumerator_declaration_template).substitute(template_params)
enumerator_typedef = string.Template(enumerator_typedef_template).substitute(template_params) + ";\n"
# event blocks:
notifier_initializer_declaration = ""
notifier_initializer_implementation = ""
declaration_template = notifier_initializer_declaration_template
implementation_template = notifier_initializer_implementation_template
first_subscription_added_typedef = ""
last_subscription_removed_typedef = ""
if annotation["event-blocks"]:
declaration_template = notifier_initializer_declaration_event_blocks_template
implementation_template = notifier_initializer_implementation_event_blocks_template
first_subscription_added_typedef = string.Template(first_subscription_added_typedef_template).substitute(template_params) + ";\n"
last_subscription_removed_typedef = string.Template(last_subscription_removed_typedef_template).substitute(template_params) + ";\n"
notifier_initializer_declaration = string.Template(declaration_template).substitute(template_params)
notifier_initializer_implementation = string.Template(implementation_template).substitute(template_params)
notifier_subscription_listener_context_property = ""
notifier_subscription_implementation_extension = ""
notifier_subscription_initializer_declaration = string.Template(
notifier_subscription_initializer_declaration_no_context_template).substitute(template_params) + ";"
if len(annotation["listener-context-ref"]):
notifier_subscription_listener_context_property = string.Template(
notifier_subscription_listener_context_property_template).substitute(template_params) + ";"
notifier_subscription_initializer_declaration = string.Template(
notifier_subscription_initializer_declaration_with_context_template).substitute(template_params) + ";"
notifier_subscription_implementation_extension = "_listenerContext = listenerContext;"
# event processors:
event_processor_typedef = ""
event_processor_property = ""
if annotation["event-processor-block"]:
event_processor_typedef = string.Template(event_processor_typedef_template).substitute(template_params) + ";"
event_processor_property = string.Template(event_processor_property_template).substitute(template_params)
# populate the templates, and write the files:
template_params.update({
"protocol_implementation" : protocol_implementation,
"input_file_hash" : input_file_hash,
# enumerator:
"enumerator_implementation" : enumerator_implementation,
"enumerator_declaration" : enumerator_declaration,
"enumerator_typedef" : enumerator_typedef,
# initializer:
"notifier_initializer_declaration" : notifier_initializer_declaration,
"notifier_initializer_implementation" : notifier_initializer_implementation,
"first_subscription_added_typedef" : first_subscription_added_typedef,
"last_subscription_removed_typedef" : last_subscription_removed_typedef,
# listener context:
"notifier_subscription_listener_context_property" : notifier_subscription_listener_context_property,
"notifier_subscription_initializer_declaration" : notifier_subscription_initializer_declaration,
"notifier_subscription_implementation_extension" : notifier_subscription_implementation_extension,
# event processor:
"event_processor_typedef" : event_processor_typedef,
"event_processor_property" : event_processor_property,
})
# subscription object implementation:
notifier_subscription_implementation = parser.remove_empty_lines(
string.Template(
notifier_subscription_implementation_template).substitute(
template_params))
template_params.update({
"notifier_subscription_implementation" : notifier_subscription_implementation,
})
# subscription object interface:
notifier_subscription_interface = string.Template(
notifier_subscription_interface_template).substitute(
template_params)
# notifier interface:
notifier_interface = string.Template(
notifier_interface_template).substitute(
template_params)
# typedef section:
typedefs = string.Template(
typedefs_template).substitute(
template_params)
# doc header:
documentation_header = string.Template(
documentation_header_template).substitute(
template_params)
# clear out some spaces:
notifier_subscription_interface = parser.remove_empty_lines(notifier_subscription_interface) + "\n\n"
notifier_interface = parser.remove_empty_lines(notifier_interface) + "\n\n"
typedefs = parser.remove_empty_lines(typedefs)
if len(typedefs):
typedefs += "\n\n"
# extend the template params:
template_params.update({
"notifier_subscription_interface" : notifier_subscription_interface,
"notifier_interface" : notifier_interface,
"typedefs" : typedefs,
"documentation_header" : documentation_header
})
# write the files:
parser.write_class_files( output_dir,
notifier_name,
string.Template(file_template_notifier_h).substitute(template_params),
string.Template(file_template_notifier_m).substitute(template_params))
####
def get_objects_with_id(objects, id):
ret = []
for object in objects:
if object[ID] == id:
ret.append(object)
return ret
####
def gen_notifier_v2(options, filename):
in_data, in_lines = parser.read_file(filename)
objects = parser.process_input_lines(options, in_lines)
objects = objects["objects"]
log_info("objects = \n%s" % pretty_pprint(objects))
protocols = get_objects_with_id(objects, "protocol")
# has to contain a protocol:
if len(protocols) <= 0 or len(protocols) > 1:
raise Exception("file, %s, doesn't cotain a protocol" % filename)
for prot_object in protocols:
log_info("prot = %s" % pretty_json(prot_object))
gen_notifier_v2_for_protocol(options, filename, objects, prot_object)
####
def main():
oparser = optparse.OptionParser(usage="\n %prog <options> <protocol h file1>")
oparser.add_option("", "--version",
action="store_true",
help="print version number",
dest="version", default=False)
oparser.add_option("", "--verbose",
action="store_true",
help="print more information while testing",
dest="verbose",
default=False)
oparser.add_option("",
"--notifier_name",
help="set (override) the name of the notifier to be <notifier_name> (default <ProtocolName>Notifier)",
dest="notifier_name",
default=None)
(options, filenames) = oparser.parse_args()
parser.log_info_enabled = options.verbose
parser.log_info("generating notifier...")
log_info("filenames = %s" % str(filenames))
log_info("options = %s" % str(options))
if options.version:
print(VERSION_STR)
sys.exit(0)
if options.notifier_name and len(filenames) > 1:
raise Exception("--notifier_name can not be set when more than one fiename is specified")
for filename in filenames:
log("generating "+ filename)
gen_notifier_v2(options, filename)
####
if __name__ == "__main__":
main()
| korovkin/WNNotifier | notifier/gen_notifier.py | Python | apache-2.0 | 23,166 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export global feature tensorflow inference model.
This model includes image pyramids for multi-scale processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow as tf
from delf.python.training.model import delf_model
from delf.python.training.model import export_model_utils
FLAGS = flags.FLAGS
flags.DEFINE_string('ckpt_path', '/tmp/delf-logdir/delf-weights',
'Path to saved checkpoint.')
flags.DEFINE_string('export_path', None, 'Path where model will be exported.')
flags.DEFINE_list(
'input_scales_list', None,
'Optional input image scales to use. If None (default), an input end-point '
'"input_scales" is added for the exported model. If not None, the '
'specified list of floats will be hard-coded as the desired input scales.')
flags.DEFINE_enum(
'multi_scale_pool_type', 'None', ['None', 'average', 'sum'],
"If 'None' (default), the model is exported with an output end-point "
"'global_descriptors', where the global descriptor for each scale is "
"returned separately. If not 'None', the global descriptor of each scale is"
' pooled and a 1D global descriptor is returned, with output end-point '
"'global_descriptor'.")
flags.DEFINE_boolean('normalize_global_descriptor', False,
'If True, L2-normalizes global descriptor.')
class _ExtractModule(tf.Module):
"""Helper module to build and save global feature model."""
def __init__(self,
multi_scale_pool_type='None',
normalize_global_descriptor=False,
input_scales_tensor=None):
"""Initialization of global feature model.
Args:
multi_scale_pool_type: Type of multi-scale pooling to perform.
normalize_global_descriptor: Whether to L2-normalize global descriptor.
input_scales_tensor: If None, the exported function to be used should be
ExtractFeatures, where an input end-point "input_scales" is added for
the exported model. If not None, the specified 1D tensor of floats will
be hard-coded as the desired input scales, in conjunction with
ExtractFeaturesFixedScales.
"""
self._multi_scale_pool_type = multi_scale_pool_type
self._normalize_global_descriptor = normalize_global_descriptor
if input_scales_tensor is None:
self._input_scales_tensor = []
else:
self._input_scales_tensor = input_scales_tensor
# Setup the DELF model for extraction.
self._model = delf_model.Delf(block3_strides=False, name='DELF')
def LoadWeights(self, checkpoint_path):
self._model.load_weights(checkpoint_path)
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image'),
tf.TensorSpec(shape=[None], dtype=tf.float32, name='input_scales'),
tf.TensorSpec(
shape=[None], dtype=tf.int32, name='input_global_scales_ind')
])
def ExtractFeatures(self, input_image, input_scales, input_global_scales_ind):
extracted_features = export_model_utils.ExtractGlobalFeatures(
input_image,
input_scales,
input_global_scales_ind,
lambda x: self._model.backbone.build_call(x, training=False),
multi_scale_pool_type=self._multi_scale_pool_type,
normalize_global_descriptor=self._normalize_global_descriptor)
named_output_tensors = {}
if self._multi_scale_pool_type == 'None':
named_output_tensors['global_descriptors'] = tf.identity(
extracted_features, name='global_descriptors')
else:
named_output_tensors['global_descriptor'] = tf.identity(
extracted_features, name='global_descriptor')
return named_output_tensors
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 3], dtype=tf.uint8, name='input_image')
])
def ExtractFeaturesFixedScales(self, input_image):
return self.ExtractFeatures(input_image, self._input_scales_tensor,
tf.range(tf.size(self._input_scales_tensor)))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export_path = FLAGS.export_path
if os.path.exists(export_path):
raise ValueError('export_path %s already exists.' % export_path)
if FLAGS.input_scales_list is None:
input_scales_tensor = None
else:
input_scales_tensor = tf.constant(
[float(s) for s in FLAGS.input_scales_list],
dtype=tf.float32,
shape=[len(FLAGS.input_scales_list)],
name='input_scales')
module = _ExtractModule(FLAGS.multi_scale_pool_type,
FLAGS.normalize_global_descriptor,
input_scales_tensor)
# Load the weights.
checkpoint_path = FLAGS.ckpt_path
module.LoadWeights(checkpoint_path)
print('Checkpoint loaded from ', checkpoint_path)
# Save the module
if FLAGS.input_scales_list is None:
served_function = module.ExtractFeatures
else:
served_function = module.ExtractFeaturesFixedScales
tf.saved_model.save(
module, export_path, signatures={'serving_default': served_function})
if __name__ == '__main__':
app.run(main)
| tombstone/models | research/delf/delf/python/training/model/export_global_model.py | Python | apache-2.0 | 5,972 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement # use the features of python 3
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping): # ABCs for read-only and mutable mappings.
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout # the cache expire time
self.close_callback = close_callback # called when value will be swept from cache
self._store = {} # dict<key, value>: store cache data key value
self._time_to_keys = collections.defaultdict(list) # defaultdict<time, list<key>>
# defaultdict: dict subclass that calls a factory function to supply missing values
self._keys_to_last_time = {} # dict<key, time> stores the last time of one key visited.
self._last_visits = collections.deque() # deque<time> store all the time once key is visited.
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0 # use to log how many keys has been swept.
while len(self._last_visits) > 0:
least = self._last_visits[0] # fetch the oldest time point
if now - least <= self.timeout: # the oldest time point hasn't expire
break
if self.close_callback is not None: # callback function has been set
for key in self._time_to_keys[least]: # fetch each key visited on the oldest time
if key in self._store: # finded the cache key
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key] # get the key of the last time and check expire or yet.
self.close_callback(value) # call callback
for key in self._time_to_keys[least]:
self._last_visits.popleft() # can't understand and have error personally
# @Sunny: use popleft to remove oldest time point in last visits
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
if __name__ == '__main__':
test()
| meowlab/shadowsocks-comment | shadowsocks/lru_cache.py | Python | apache-2.0 | 4,886 |
#!/usr/bin/env python
def run_test(n, m, power, bullet):
prev_dict = {}
cur_dict = {}
for i in xrange(n):
ri = n-1-i
for j in xrange(m):
if i == 0:
cur_dict[power[ri][j]] = power[ri][j]
else:
new_k = power[ri][j]
for k, v in prev_dict.items():
all_bullet = new_k + k - min(v, bullet[ri][j])
if cur_dict.has_key(all_bullet):
cur_dict[all_bullet] = min(new_k, cur_dict[all_bullet])
else:
cur_dict[all_bullet] = new_k
prev_dict = {}
for c, t in cur_dict.items():
small = True
for c1, t1 in cur_dict.items():
if c1 < c and t1 < t:
small = False
break
if small:
prev_dict[c] = t
# print "%s" % (prev_dict)
cur_dict = {}
smallest = None
for t in prev_dict.keys():
if smallest is None or t < smallest:
smallest = t
print smallest
return smallest
def mtest1():
n = 3
m = 3
power = [[1, 2, 3], [3, 2, 1], [3, 2, 1]]
bullet = [[1, 2, 3], [3, 2, 1], [1, 2, 3]]
run_test(n, m, power, bullet)
def mtest2():
n = 3
m = 2
power = [[1, 8], [6, 1], [4, 6]]
bullet = [[2, 1], [4, 1], [3, 1]]
run_test(n, m, power, bullet)
def mtest3():
n = 3
m = 3
power = [[3, 2, 5], [8, 9, 1], [4, 7, 6]]
bullet = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
run_test(n, m, power, bullet)
def mtest3():
n = 3
m = 2
power = [[5, 10], [50, 60], [20, 25]]
bullet = [[5, 50], [5, 20], [1, 1]]
run_test(n, m, power, bullet)
def manual_test():
mtest1()
mtest2()
mtest3()
if __name__ == "__main__":
manual_test()
| SwordYoung/cutprob | hackerrank/contest/w13/a-super-hero/a.py | Python | artistic-2.0 | 1,849 |
import ptypes, pecoff
from ptypes import *
from . import error, ldrtypes, rtltypes, umtypes, ketypes, Ntddk, heaptypes, sdkddkver
from .datatypes import *
class PEB_FREE_BLOCK(pstruct.type): pass
class PPEB_FREE_BLOCK(P(PEB_FREE_BLOCK)): pass
PEB_FREE_BLOCK._fields_ = [(PPEB_FREE_BLOCK, 'Next'), (ULONG, 'Size')]
class _Win32kCallbackTable(pstruct.type, versioned):
_fields_ = [
(PVOID, 'fnCOPYDATA'),
(PVOID, 'fnCOPYGLOBALDATA'),
(PVOID, 'fnDWORD'),
(PVOID, 'fnNCDESTROY'),
(PVOID, 'fnDWORDOPTINLPMSG'),
(PVOID, 'fnINOUTDRAG'),
(PVOID, 'fnGETTEXTLENGTHS'),
(PVOID, 'fnINCNTOUTSTRING'),
(PVOID, 'fnPOUTLPINT'),
(PVOID, 'fnINLPCOMPAREITEMSTRUCT'),
(PVOID, 'fnINLPCREATESTRUCT'),
(PVOID, 'fnINLPDELETEITEMSTRUCT'),
(PVOID, 'fnINLPDRAWITEMSTRUCT'),
(PVOID, 'fnPOPTINLPUINT'),
(PVOID, 'fnPOPTINLPUINT2'),
(PVOID, 'fnINLPMDICREATESTRUCT'),
(PVOID, 'fnINOUTLPMEASUREITEMSTRUCT'),
(PVOID, 'fnINLPWINDOWPOS'),
(PVOID, 'fnINOUTLPPOINT5'),
(PVOID, 'fnINOUTLPSCROLLINFO'),
(PVOID, 'fnINOUTLPRECT'),
(PVOID, 'fnINOUTNCCALCSIZE'),
(PVOID, 'fnINOUTLPPOINT5_'),
(PVOID, 'fnINPAINTCLIPBRD'),
(PVOID, 'fnINSIZECLIPBRD'),
(PVOID, 'fnINDESTROYCLIPBRD'),
(PVOID, 'fnINSTRING'),
(PVOID, 'fnINSTRINGNULL'),
(PVOID, 'fnINDEVICECHANGE'),
(PVOID, 'fnPOWERBROADCAST'),
(PVOID, 'fnINLPUAHDRAWMENU'),
(PVOID, 'fnOPTOUTLPDWORDOPTOUTLPDWORD'),
(PVOID, 'fnOPTOUTLPDWORDOPTOUTLPDWORD_'),
(PVOID, 'fnOUTDWORDINDWORD'),
(PVOID, 'fnOUTLPRECT'),
(PVOID, 'fnOUTSTRING'),
(PVOID, 'fnPOPTINLPUINT3'),
(PVOID, 'fnPOUTLPINT2'),
(PVOID, 'fnSENTDDEMSG'),
(PVOID, 'fnINOUTSTYLECHANGE'),
(PVOID, 'fnHkINDWORD'),
(PVOID, 'fnHkINLPCBTACTIVATESTRUCT'),
(PVOID, 'fnHkINLPCBTCREATESTRUCT'),
(PVOID, 'fnHkINLPDEBUGHOOKSTRUCT'),
(PVOID, 'fnHkINLPMOUSEHOOKSTRUCTEX'),
(PVOID, 'fnHkINLPKBDLLHOOKSTRUCT'),
(PVOID, 'fnHkINLPMSLLHOOKSTRUCT'),
(PVOID, 'fnHkINLPMSG'),
(PVOID, 'fnHkINLPRECT'),
(PVOID, 'fnHkOPTINLPEVENTMSG'),
(PVOID, 'xxxClientCallDelegateThread'),
(PVOID, 'ClientCallDummyCallback'),
(PVOID, 'fnKEYBOARDCORRECTIONCALLOUT'),
(PVOID, 'fnOUTLPCOMBOBOXINFO'),
(PVOID, 'fnINLPCOMPAREITEMSTRUCT2'),
(PVOID, 'xxxClientCallDevCallbackCapture'),
(PVOID, 'xxxClientCallDitThread'),
(PVOID, 'xxxClientEnableMMCSS'),
(PVOID, 'xxxClientUpdateDpi'),
(PVOID, 'xxxClientExpandStringW'),
(PVOID, 'ClientCopyDDEIn1'),
(PVOID, 'ClientCopyDDEIn2'),
(PVOID, 'ClientCopyDDEOut1'),
(PVOID, 'ClientCopyDDEOut2'),
(PVOID, 'ClientCopyImage'),
(PVOID, 'ClientEventCallback'),
(PVOID, 'ClientFindMnemChar'),
(PVOID, 'ClientFreeDDEHandle'),
(PVOID, 'ClientFreeLibrary'),
(PVOID, 'ClientGetCharsetInfo'),
(PVOID, 'ClientGetDDEFlags'),
(PVOID, 'ClientGetDDEHookData'),
(PVOID, 'ClientGetListboxString'),
(PVOID, 'ClientGetMessageMPH'),
(PVOID, 'ClientLoadImage'),
(PVOID, 'ClientLoadLibrary'),
(PVOID, 'ClientLoadMenu'),
(PVOID, 'ClientLoadLocalT1Fonts'),
(PVOID, 'ClientPSMTextOut'),
(PVOID, 'ClientLpkDrawTextEx'),
(PVOID, 'ClientExtTextOutW'),
(PVOID, 'ClientGetTextExtentPointW'),
(PVOID, 'ClientCharToWchar'),
(PVOID, 'ClientAddFontResourceW'),
(PVOID, 'ClientThreadSetup'),
(PVOID, 'ClientDeliverUserApc'),
(PVOID, 'ClientNoMemoryPopup'),
(PVOID, 'ClientMonitorEnumProc'),
(PVOID, 'ClientCallWinEventProc'),
(PVOID, 'ClientWaitMessageExMPH'),
(PVOID, 'ClientWOWGetProcModule'),
(PVOID, 'ClientWOWTask16SchedNotify'),
(PVOID, 'ClientImmLoadLayout'),
(PVOID, 'ClientImmProcessKey'),
(PVOID, 'fnIMECONTROL'),
(PVOID, 'fnINWPARAMDBCSCHAR'),
(PVOID, 'fnGETTEXTLENGTHS2'),
(PVOID, 'fnINLPKDRAWSWITCHWND'),
(PVOID, 'ClientLoadStringW'),
(PVOID, 'ClientLoadOLE'),
(PVOID, 'ClientRegisterDragDrop'),
(PVOID, 'ClientRevokeDragDrop'),
(PVOID, 'fnINOUTMENUGETOBJECT'),
(PVOID, 'ClientPrinterThunk'),
(PVOID, 'fnOUTLPCOMBOBOXINFO2'),
(PVOID, 'fnOUTLPSCROLLBARINFO'),
(PVOID, 'fnINLPUAHDRAWMENU2'),
(PVOID, 'fnINLPUAHDRAWMENUITEM'),
(PVOID, 'fnINLPUAHDRAWMENU3'),
(PVOID, 'fnINOUTLPUAHMEASUREMENUITEM'),
(PVOID, 'fnINLPUAHDRAWMENU4'),
(PVOID, 'fnOUTLPTITLEBARINFOEX'),
(PVOID, 'fnTOUCH'),
(PVOID, 'fnGESTURE'),
(PVOID, 'fnPOPTINLPUINT4'),
(PVOID, 'fnPOPTINLPUINT5'),
(PVOID, 'xxxClientCallDefaultInputHandler'),
(PVOID, 'fnEMPTY'),
(PVOID, 'ClientRimDevCallback'),
(PVOID, 'xxxClientCallMinTouchHitTestingCallback'),
(PVOID, 'ClientCallLocalMouseHooks'),
(PVOID, 'xxxClientBroadcastThemeChange'),
(PVOID, 'xxxClientCallDevCallbackSimple'),
(PVOID, 'xxxClientAllocWindowClassExtraBytes'),
(PVOID, 'xxxClientFreeWindowClassExtraBytes'),
(PVOID, 'fnGETWINDOWDATA'),
(PVOID, 'fnINOUTSTYLECHANGE2'),
(PVOID, 'fnHkINLPMOUSEHOOKSTRUCTEX2'),
]
class PEB(pstruct.type, versioned):
'''
0x0098 NT 3.51
0x0150 NT 4.0
0x01E8 Win2k
0x020C XP
0x0230 WS03
0x0238 Vista
0x0240 Win7_BETA
0x0248 Win6
0x0250 Win8
0x045C Win10
'''
class BitField(pbinary.flags):
_fields_ = [
(1, 'ImageUsesLargePages'),
(1, 'IsProtectedProcess'),
(1, 'IsLegacyProcess'),
(1, 'IsImageDynamicallyRelocated'),
(1, 'SkipPatchingUser32Forwarders'),
(1, 'SpareBits'),
]
class CrossProcessFlags(pbinary.flags):
_fields_ = [
(1, 'ProcessInJob'),
(1, 'ProcessInitializing'),
(1, 'ProcessUsingVEH'),
(1, 'ProcessUsingVCH'),
(1, 'ProcessUsingFTH'),
(27, 'ReservedBits0'),
]
class NtGlobalFlag(pbinary.flags):
def __init__(self, **attrs):
super(PEB.NtGlobalFlag, self).__init__(**attrs)
f = []
f.extend([
(1, 'FLG_STOP_ON_EXCEPTION'), # 0x00000001
(1, 'FLG_SHOW_LDR_SNAPS'), # 0x00000002
(1, 'FLG_DEBUG_INITIAL_COMMAND'), # 0x00000004
(1, 'FLG_STOP_ON_HUNG_GUI'), # 0x00000008
(1, 'FLG_HEAP_ENABLE_TAIL_CHECK'), # 0x00000010
(1, 'FLG_HEAP_ENABLE_FREE_CHECK'), # 0x00000020
(1, 'FLG_HEAP_VALIDATE_PARAMETERS'), # 0x00000040
(1, 'FLG_HEAP_VALIDATE_ALL'), # 0x00000080
(1, 'FLG_POOL_ENABLE_TAIL_CHECK'), # 0x00000100
(1, 'FLG_POOL_ENABLE_FREE_CHECK'), # 0x00000200
(1, 'FLG_POOL_ENABLE_TAGGING'), # 0x00000400
(1, 'FLG_HEAP_ENABLE_TAGGING'), # 0x00000800
(1, 'FLG_USER_STACK_TRACE_DB'), # 0x00001000
(1, 'FLG_KERNEL_STACK_TRACE_DB'), # 0x00002000
(1, 'FLG_MAINTAIN_OBJECT_TYPELIST'), # 0x00004000
(1, 'FLG_HEAP_ENABLE_TAG_BY_DLL'), # 0x00008000
(1, 'FLG_IGNORE_DEBUG_PRIV'), # 0x00010000
(1, 'FLG_ENABLE_CSRDEBUG'), # 0x00020000
(1, 'FLG_ENABLE_KDEBUG_SYMBOL_LOAD'), # 0x00040000
(1, 'FLG_DISABLE_PAGE_KERNEL_STACKS'), # 0x00080000
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WINXP:
f.append((1, 'FLG_HEAP_ENABLE_CALL_TRACING')) #0x00100000
else:
f.append((1, 'FLG_ENABLE_SYSTEM_CRIT_BREAKS')) #0x00100000
f.extend([
(1, 'FLG_HEAP_DISABLE_COALESCING'), # 0x00200000
(1, 'FLG_ENABLE_CLOSE_EXCEPTIONS'), # 0x00400000
(1, 'FLG_ENABLE_EXCEPTION_LOGGING'), # 0x00800000
(1, 'FLG_ENABLE_HANDLE_TYPE_TAGGING'), # 0x01000000
(1, 'FLG_HEAP_PAGE_ALLOCS'), # 0x02000000
(1, 'FLG_DEBUG_INITIAL_COMMAND_EX'), # 0x04000000
])
f.append((1+1+1+1+1, 'FLG_RESERVED'))
self._fields_ = list(reversed(f))
def __repr__(self):
ofs = '[{:x}]'.format(self.getoffset())
names = '|'.join((k for k, v in self.items() if v))
return ' '.join([ofs, self.name(), names, '{!r}'.format(self.serialize())])
class TracingFlags(pbinary.flags):
_fields_ = [
(1, 'HeapTracingEnabled'),
(1, 'CritSecTracingEnabled'),
(1, 'LibLoaderTracingEnabled'),
(29, 'SpareTracingBits'),
]
def __init__(self, **attrs):
super(PEB, self).__init__(**attrs)
self._fields_ = f = []
aligned = dyn.align(8 if getattr(self, 'WIN64', False) else 4)
f.extend([
(UCHAR, 'InheritedAddressSpace'),
(UCHAR, 'ReadImageFileExecOptions'),
(UCHAR, 'BeingDebugged'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.append( (pbinary.littleendian(PEB.BitField), 'BitField') )
else:
raise error.NdkUnsupportedVersion(self)
f.append( (BOOLEAN, 'SpareBool') )
f.extend([
(aligned, 'align(Mutant)'),
(HANDLE, 'Mutant'),
(P(pecoff.Executable.File), 'ImageBaseAddress'),
(ldrtypes.PPEB_LDR_DATA, 'Ldr'),
(P(rtltypes.RTL_USER_PROCESS_PARAMETERS), 'ProcessParameters'),
(PVOID, 'SubSystemData'),
(P(heaptypes.HEAP), 'ProcessHeap'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN7:
f.extend([
(P(rtltypes.RTL_CRITICAL_SECTION), 'FastPebLock'),
(PVOID, 'AltThunkSListPtr'),
(PVOID, 'IFEOKey'),
(pbinary.littleendian(PEB.CrossProcessFlags), 'CrossProcessFlags'),
(aligned, 'align(UserSharedInfoPtr)'),
(P(_Win32kCallbackTable), 'UserSharedInfoPtr'),
(ULONG, 'SystemReserved'),
(ULONG, 'AtlThunkSListPtr32') if getattr(self, 'WIN64', False) else (ULONG, 'SpareUlong'),
(P(API_SET_MAP), 'ApiSetMap'),
])
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
raise error.NdkUnsupportedVersion(self)
f.extend([
(P(rtltypes.RTL_CRITICAL_SECTION), 'FastPebLock'),
(PVOID, 'AltThunkSListPtr'),
(PVOID, 'IFEOKey'),
(ULONG, 'CrossProcessFlags'),
(P(_Win32kCallbackTable), 'UserSharedInfoPtr'),
(ULONG, 'SystemReserved'),
(ULONG, 'SpareUlong'),
(PVOID, 'SparePebPtr0'),
])
else:
raise error.NdkUnsupportedVersion(self)
f.extend([
(P(rtltypes.RTL_CRITICAL_SECTION), 'FastPebLock'),
(PVOID, 'FastPebLockRoutine'),
(PVOID, 'FastPebUnlockRoutine'),
(ULONG, 'EnvironmentUpdateCount'),
(P(_Win32kCallbackTable), 'KernelCallbackTable'),
(PVOID, 'EventLogSection'),
(PVOID, 'EventLog'),
(PPEB_FREE_BLOCK, 'FreeList'),
])
f.extend([
(ULONG, 'TlsExpansionCounter'),
(aligned, 'align(TlsBitmap)'),
(PVOID, 'TlsBitmap'), # FIXME: Does TlsBitmapBits represent the number of bytes that are in use?
(dyn.clone(BitmapBitsUlong, _object_=ULONG, length=2), 'TlsBitmapBits'),
(PVOID, 'ReadOnlySharedMemoryBase'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.append((PVOID, 'HotpatchInformation'))
else:
f.append((PVOID, 'ReadOnlySharedMemoryHeap'))
f.extend([
(P(PVOID), 'ReadOnlyStaticServerData'),
(PVOID, 'AnsiCodePageData'),
(PVOID, 'OemCodePageData'),
(PVOID, 'UnicodeCaseTableData'),
(ULONG, 'NumberOfProcessors'),
(pbinary.littleendian(PEB.NtGlobalFlag), 'NtGlobalFlag'),
(dyn.align(8), 'Reserved'),
(LARGE_INTEGER, 'CriticalSectionTimeout'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'HeapSegmentReserve'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'HeapSegmentCommit'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'HeapDeCommitTotalFreeThreshold'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'HeapDeCommitFreeBlockThreshold'),
(ULONG, 'NumberOfHeaps'),
(ULONG, 'MaximumNumberOfHeaps'),
(lambda s: P(dyn.clone(heaptypes.ProcessHeapEntries, length=s['NumberOfHeaps'].li.int())), 'ProcessHeaps'),
# (P(win32k.GDI_HANDLE_TABLE), 'GdiSharedHandleTable'),
(PVOID, 'GdiSharedHandleTable'),
(PVOID, 'ProcessStarterHelper'),
(ULONG, 'GdiDCAttributeList'),
])
f.extend([
(aligned, 'align(LoaderLock)'),
(P(rtltypes.RTL_CRITICAL_SECTION), 'LoaderLock')
])
f.extend([
(ULONG, 'OSMajorVersion'),
(ULONG, 'OSMinorVersion'),
(USHORT, 'OSBuildNumber'),
(USHORT, 'OSCSDVersion'),
(ULONG, 'OSPlatformId'),
(ULONG, 'ImageSubSystem'),
(ULONG, 'ImageSubSystemMajorVersion'),
(ULONG, 'ImageSubSystemMinorVersion'),
(aligned, 'align(ActiveProcessAffinityMask)'),
(ULONG, 'ActiveProcessAffinityMask'),
(aligned, 'align(GdiHandleBuffer)'),
(dyn.array(ULONG, 0x3c if getattr(self, 'WIN64', False) else 0x22), 'GdiHandleBuffer'),
(PVOID, 'PostProcessInitRoutine'),
(PVOID, 'TlsExpansionBitmap'),
(dyn.clone(BitmapBitsUlong, _object_=ULONG, length=0x20), 'TlsExpansionBitmapBits'),
(ULONG, 'SessionId'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WINBLUE:
f.extend([
(dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'Padding5'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WINXP:
f.extend([
(aligned, 'align(AppCompatFlags)'),
(ULARGE_INTEGER, 'AppCompatFlags'),
(ULARGE_INTEGER, 'AppCompatFlagsUser'),
(PVOID, 'pShimData'),
(PVOID, 'AppCompatInfo'),
(umtypes.UNICODE_STRING, 'CSDVersion'),
(PVOID, 'ActivationContextData'), # FIXME: P(_ACTIVATION_CONTEXT_DATA)
(PVOID, 'ProcessAssemblyStorageMap'), # FIXME: P(_ASSEMBLY_STORAGE_MAP)
(PVOID, 'SystemDefaultActivationContextData'), # FIXME: P(_ACTIVATION_CONTEXT_DATA)
(PVOID, 'SystemAssemblyStorageMap'), # FIXME: P(_ASSEMBLY_STORAGE_MAP)
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'MinimumStackCommit'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WS03:
f.extend([
(PVOID, 'FlsCallback'), # FIXME: P(_FLS_CALLBACK_INFO)
(LIST_ENTRY, 'FlsListHead'),
(PVOID, 'FlsBitmap'),
(dyn.clone(BitmapBitsUlong, _object_=ULONG, length=4), 'FlsBitmapBits'),
(ULONG, 'FlsHighIndex'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.extend([
(aligned, 'align(WerRegistrationData)'),
(PVOID, 'WerRegistrationData'),
(PVOID, 'WerShipAssertPtr'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN7:
f.extend([
(PVOID, 'pContextData'),
(PVOID, 'pImageHeaderHash'),
(pbinary.littleendian(PEB.TracingFlags), 'TracingFlags')
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WINBLUE:
f.extend([
(dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'Padding6'),
(ULONGLONG, 'CsrServerReadOnlySharedMemoryBase')
])
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN8:
f.extend([
(ULONGLONG, 'CsrServerReadOnlySharedMemoryBase')
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN10_TH2:
f.extend([
(ULONG, 'TppWorkerpListLock'),
(LIST_ENTRY, 'TppWorkerpList'),
(dyn.array(PVOID, 128), 'WaitOnAddressHashTable'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN10_RS3:
f.extend([
(PVOID, 'TelemetryCoverageHeader'),
(ULONG, 'CloudFileFlags'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN10_RS4:
f.extend([
(ULONG, 'CloudFileDiagFlags'),
(CHAR, 'PlaceHolderCompatibilityMode'),
(dyn.block(7), 'PlaceHolderCompatibilityModeReserved'),
])
# FIXME: Some fields added for windows 10 RS5
# See https://www.geoffchappell.com/studies/windows/win32/ntdll/structs/peb/index.htm
return
def getmodulebyname(self, name):
ldr = self['Ldr'].d.l
for m in ldr.walk():
if m['BaseDllName'].str() == name:
return m
continue
raise KeyError(name)
def getmodulebyaddress(self, address):
ldr = self['Ldr'].d.l
for m in ldr.walk():
start, size = m['DllBase'].int(), m['SizeOfImage'].int()
left, right = start, start+size
if address >= left and address <= right:
return m
continue
raise KeyError(name)
def getmodulebyfullname(self, name):
ldr = self['Ldr'].d.l
name = name.lower().replace('\\', '/')
for m in ldr.walk():
if m['FullDllName'].str().lower().replace('\\', '/') == name:
return m
continue
raise KeyError(name)
class TEB_ACTIVE_FRAME_CONTEXT(pstruct.type):
_fields_ = [
(ULONG, 'Flags'),
(P(umtypes.PSTR), 'FrameName'),
]
class TEB_ACTIVE_FRAME(pstruct.type):
_fields_ = [
(ULONG, 'Flags'),
(lambda s: P(TEB_ACTIVE_FRAME), 'Previous'),
(P(TEB_ACTIVE_FRAME_CONTEXT), 'Context'),
]
class PTEB_ACTIVE_FRAME(P(TEB_ACTIVE_FRAME)): pass
class GDI_TEB_BATCH(pstruct.type):
_fields_ = [
(ULONG, 'Offset'),
(HANDLE, 'HDC'),
(dyn.array(ULONG, 0x136), 'Buffer'),
]
class TEB(pstruct.type, versioned):
'''
0x0F28 NT 3.51
0x0F88 NT 4.0
0x0FA4 Win2k
0x0FB4 prior to XP SP2
0x0FB8 XP SP2/WS03+
0x0FBC WS03 SP1+
0x0FF8 Vista/WS08
0x0FE4 Win7/WS08 R2
0x0FE8 Win8-Win8.1/WS12
0x1000 Win10
'''
@pbinary.littleendian
class _SameTebFlags(pbinary.flags):
_fields_ = [
(1, 'SafeThunkCall'),
(1, 'InDebugPrint'),
(1, 'HasFiberData'),
(1, 'SkipThreadAttach'),
(1, 'WerInShipAssertCode'),
(1, 'RanProcessInit'),
(1, 'ClonedThread'),
(1, 'SuppressDebugMsg'),
(1, 'DisableUserStackWalk'),
(1, 'RtlExceptionAttached'),
(1, 'InitialThread'),
(1, 'SessionAware'),
(1, 'LoadOwner'),
(1, 'LoaderWorker'),
(2, 'SpareSameTebBits'),
]
def __init__(self, **attrs):
super(TEB, self).__init__(**attrs)
self._fields_ = f = []
aligned = dyn.align(8 if getattr(self, 'WIN64', False) else 4)
f.extend([
(NT_TIB, 'Tib'),
(PVOID, 'EnvironmentPointer'),
(umtypes.CLIENT_ID, 'ClientId'),
(PVOID, 'ActiveRpcHandle'),
(PVOID, 'ThreadLocalStoragePointer'),
(P(PEB), 'ProcessEnvironmentBlock'),
(ULONG, 'LastErrorValue'),
(ULONG, 'CountOfOwnedCriticalSections'),
(PVOID, 'CsrClientThread'),
(P(Ntddk.W32THREAD), 'Win32ThreadInfo'),
(dyn.array(ULONG, 0x1a), 'User32Reserved'),
(dyn.array(ULONG, 5), 'UserReserved'),
(aligned, 'align(WOW32Reserved)'),
(PVOID, 'WOW32Reserved'),
(LCID, 'CurrentLocale'),
(ULONG, 'FpSoftwareStatusRegister'),
(dyn.array(PVOID, 0x36), 'SystemReserved1'),
(LONG, 'ExceptionCode'),
(aligned, 'align(ActivationContextStackPointer)'),
(P(Ntddk.ACTIVATION_CONTEXT_STACK), 'ActivationContextStackPointer'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WS03:
f.append((dyn.block(28 if getattr(self, 'WIN64', False) else 24), 'SpareBytes1'))
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) == sdkddkver.NTDDI_WS03:
f.append((dyn.block(28 if getattr(self, 'WIN64', False) else 0x28), 'SpareBytes1'))
else:
f.append((dyn.block(24 if getattr(self, 'WIN64', False) else 0x24), 'SpareBytes1'))
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.append((ULONG, 'TxFsContext'))
f.extend([
(aligned, 'align(GdiTebBatch)'),
(GDI_TEB_BATCH, 'GdiTebBatch'),
(aligned, 'align(RealClientId)'),
(umtypes.CLIENT_ID, 'RealClientId'),
(PVOID, 'GdiCachedProcessHandle'),
(ULONG, 'GdiClientPID'),
(ULONG, 'GdiClientTID'),
(PVOID, 'GdiThreadLocalInfo'),
(dyn.array(PVOID, 62), 'Win32ClientInfo'),
(dyn.array(PVOID, 0xe9), 'glDispatchTable'),
(dyn.array(PVOID, 0x1d), 'glReserved1'),
(PVOID, 'glReserved2'),
(PVOID, 'glSectionInfo'),
(PVOID, 'glSection'),
(PVOID, 'glTable'),
(PVOID, 'glCurrentRC'),
(PVOID, 'glContext'),
(aligned, 'align(LastStatusValue)'),
(umtypes.NTSTATUS, 'LastStatusValue'),
(aligned, 'align(StaticUnicodeString)'),
(umtypes.UNICODE_STRING, 'StaticUnicodeString'),
(dyn.clone(pstr.wstring, length=0x106), 'StaticUnicodeBuffer'),
(aligned, 'align(DeallocationStack)'),
(PVOID, 'DeallocationStack'),
(dyn.array(PVOID, 0x40), 'TlsSlots'),
(LIST_ENTRY, 'TlsLinks'),
(PVOID, 'Vdm'),
(PVOID, 'ReservedForNtRpc'),
(dyn.array(PVOID, 0x2), 'DbgSsReserved'),
(ULONG, 'HardErrorMode'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_VISTA:
f.extend([
(aligned, 'align(Instrumentation)'),
(dyn.array(PVOID, 14 if getattr(self, 'WIN64', False) else 16), 'Instrumentation')
])
else:
f.extend([
(aligned, 'align(Instrumentation)'),
(dyn.array(PVOID, 11 if getattr(self, 'WIN64', False) else 9), 'Instrumentation')
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) <= sdkddkver.NTDDI_WS03:
f.extend([
(PVOID, 'SubProcessTag'),
(PVOID, 'EtwTraceData'),
])
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.extend([
(GUID, 'ActivityId'),
(PVOID, 'SubProcessTag'),
(PVOID, 'EtwLocalData'),
(PVOID, 'EtwTraceData'),
])
f.extend([
(PVOID, 'WinSockData'),
(ULONG, 'GdiBatchCount'),
])
f.extend([
(UCHAR, 'InDbgPrint'),
(UCHAR, 'FreeStackOnTermination'),
(UCHAR, 'HasFiberData'),
(UCHAR, 'IdealProcessor'),
])
f.extend([
(ULONG, 'GuaranteedStackBytes'),
(aligned, 'align(ReservedForPerf)'),
(PVOID, 'ReservedForPerf'),
(aligned, 'align(ReservedForOle)'),
(PVOID, 'ReservedForOle'),
(ULONG, 'WaitingOnLoaderLock'),
(dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'padding(WaitingOnLoaderLock)'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) <= sdkddkver.NTDDI_WS03:
f.extend([
(ULONGLONG, 'SparePointer1'),
(ULONGLONG, 'SoftPatchPtr1'),
(ULONGLONG, 'SoftPatchPtr2'),
])
else:
f.extend([
(aligned, 'align(SavedPriorityState)'),
(PVOID, 'SavedPriorityState'),
(ULONGLONG if getattr(self, 'WIN64', False) else ULONG, 'SoftPatchPtr1'),
(PVOID, 'ThreadPoolData'),
])
f.extend([
(PVOID, 'TlsExpansionSlots'),
])
if getattr(self, 'WIN64', False):
f.extend([
(PVOID, 'DeallocationBStore'),
(PVOID, 'BStoreLimit'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WIN7:
f.append((ULONG, 'ImpersonationLocale'))
else:
f.append((ULONG, 'MuiGeneration'))
f.extend([
(ULONG, 'IsImpersonating'),
(PVOID, 'NlsCache'),
(PVOID, 'pShimData'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) <= sdkddkver.NTDDI_WIN7:
f.append((ULONG, 'HeapVirtualAffinity'))
else:
f.extend([
(USHORT, 'HeapVirtualAffinity'),
(USHORT, 'LowFragHeapDataSlot'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WINBLUE:
f.extend([
(dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'Padding7'),
])
f.extend([
(aligned, 'align(CurrentTransactionHandle)'),
(PVOID, 'CurrentTransactionHandle'),
(PTEB_ACTIVE_FRAME, 'ActiveFrame'),
(PVOID, 'FlsData'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) <= sdkddkver.NTDDI_WS03:
f.extend([
(UCHAR, 'SafeThunkCall'),
(dyn.array(UCHAR, 3), 'BooleanSpare'),
])
return
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_VISTA:
f.extend([
(PVOID, 'PreferredLangauges'),
(PVOID, 'UserPrefLanguages'),
(PVOID, 'MergedPrefLanguages'),
(ULONG, 'MuiImpersonation'),
(USHORT, 'CrossTebFlags'),
(TEB._SameTebFlags, 'SameTebFlags'),
(PVOID, 'TxnScopeEnterCallback'),
(PVOID, 'TxnScopeExitCallback'),
(PVOID, 'TxnScopeContext'),
(ULONG, 'LockCount'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) <= sdkddkver.NTDDI_VISTA:
f.extend([
(ULONG, 'ProcessRundown'),
(ULONGLONG, 'LastSwitchTime'),
(ULONGLONG, 'TotalSwitchOutTime'),
(LARGE_INTEGER, 'WaitReasonBitmap'),
])
return
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WIN10:
f.extend([
(ULONG, 'SpareUlong0'),
(PVOID, 'ResourceRetValue'),
])
else:
f.extend([
(ULONG, 'WowTebOffset'),
(PVOID, 'ResourceRetValue'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN8:
f.extend([
(PVOID, 'ReservedForWdf'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN10:
f.extend([
(ULONGLONG, 'ReservedForCrt'),
(GUID, 'EffectiveContainerId'),
])
return
class THREAD_INFORMATION_CLASS(pint.enum):
_values_ = [(name, value) for value, name in [
(0, 'ThreadBasicInformation'),
(1, 'ThreadTimes'),
(2, 'ThreadPriority'),
(3, 'ThreadBasePriority'),
(4, 'ThreadAffinityMask'),
(5, 'ThreadImpersonationToken'),
(6, 'ThreadDescriptorTableEntry'),
(7, 'ThreadEnableAlignmentFaultFixup'),
(8, 'ThreadEventPair'),
(9, 'ThreadQuerySetWin32StartAddress'),
(10, 'ThreadZeroTlsCell'),
(11, 'ThreadPerformanceCount'),
(12, 'ThreadAmILastThread'),
(13, 'ThreadIdealProcessor'),
(14, 'ThreadPriorityBoost'),
(15, 'ThreadSetTlsArrayAddress'),
(16, 'ThreadIsIoPending'),
(17, 'ThreadHideFromDebugger'),
(18, 'ThreadBreakOnTermination'),
(19, 'ThreadSwitchLegacyState'),
(20, 'ThreadIsTerminated'),
(21, 'ThreadLastSystemCall'),
(22, 'ThreadIoPriority'),
(23, 'ThreadCycleTime'),
(24, 'ThreadPagePriority'),
(25, 'ThreadActualBasePriority'),
(26, 'ThreadTebInformation'),
(27, 'ThreadCSwitchMon'),
(28, 'ThreadCSwitchPmu'),
(29, 'ThreadWow64Context'),
(30, 'ThreadGroupInformation'),
(31, 'ThreadUmsInformation'),
(32, 'ThreadCounterProfiling'),
(33, 'ThreadIdealProcessorEx'),
(34, 'ThreadCpuAccountingInformation'),
(35, 'ThreadSuspendCount'),
(36, 'ThreadHeterogeneousCpuPolicy'),
(37, 'ThreadContainerId'),
(38, 'ThreadNameInformation'),
(39, 'ThreadProperty'),
]]
class THREAD_BASIC_INFORMATION(pstruct.type, versioned):
type = THREAD_INFORMATION_CLASS.byname('ThreadBasicInformation')
def __init__(self, **attrs):
super(THREAD_BASIC_INFORMATION, self).__init__(**attrs)
self._fields_ = [
(umtypes.NTSTATUS, 'ExitStatus'),
(PVOID, 'TebBaseAddress'),
(umtypes.CLIENT_ID, 'ClientId'),
(umtypes.KAFFINITY, 'AffinityMask'),
(umtypes.KPRIORITY, 'Priority'),
(umtypes.KPRIORITY, 'BasePriority'),
]
class THREAD_PROPERTY_INFORMATION(pstruct.type):
type = THREAD_INFORMATION_CLASS.byname('ThreadProperty')
_fields_ = [
(ULONGLONG, 'Key'),
(PVOID, 'Object'),
(PVOID, 'Thread'),
(ULONG, 'Flags'),
]
class PROCESS_INFORMATION_CLASS(pint.enum):
_values_ = [(name, value) for value, name in [
(0, 'ProcessBasicInformation'),
(1, 'ProcessQuotaLimits'),
(2, 'ProcessIoCounters'),
(3, 'ProcessVmCounters'),
(4, 'ProcessTimes'),
(5, 'ProcessBasePriority'),
(6, 'ProcessRaisePriority'),
(7, 'ProcessDebugPort'),
(8, 'ProcessExceptionPort'),
(9, 'ProcessAccessToken'),
(10, 'ProcessLdtInformation'),
(11, 'ProcessLdtSize'),
(12, 'ProcessDefaultHardErrorMode'),
(13, 'ProcessIoPortHandlers'),
(14, 'ProcessPooledUsageAndLimits'),
(15, 'ProcessWorkingSetWatch'),
(16, 'ProcessUserModeIOPL'),
(17, 'ProcessEnableAlignmentFaultFixup'),
(18, 'ProcessPriorityClass'),
(19, 'ProcessWx86Information'),
(20, 'ProcessHandleCount'),
(21, 'ProcessAffinityMask'),
(22, 'ProcessPriorityBoost'),
(23, 'ProcessDeviceMap'),
(24, 'ProcessSessionInformation'),
(25, 'ProcessForegroundInformation'),
(26, 'ProcessWow64Information'),
(27, 'ProcessImageFileName'),
(28, 'ProcessLUIDDeviceMapsEnabled'),
(29, 'ProcessBreakOnTermination'),
(30, 'ProcessDebugObjectHandle'),
(31, 'ProcessDebugFlags'),
(32, 'ProcessHandleTracing'),
(33, 'ProcessIoPriority'),
(34, 'ProcessExecuteFlags'),
(35, 'ProcessResourceManagement'),
(36, 'ProcessCookie'),
(37, 'ProcessImageInformation'),
(38, 'ProcessCycleTime'),
(39, 'ProcessPagePriority'),
(40, 'ProcessInstrumentationCallback'),
(41, 'ProcessThreadStackAllocation'),
(42, 'ProcessWorkingSetWatchEx'),
(43, 'ProcessImageFileNameWin32'),
(44, 'ProcessImageFileMapping'),
(45, 'ProcessAffinityUpdateMode'),
(46, 'ProcessMemoryAllocationMode'),
(47, 'ProcessGroupInformation'),
(48, 'ProcessTokenVirtualizationEnabled'),
(49, 'ProcessConsoleHostProcess'),
(50, 'ProcessWindowInformation'),
(51, 'ProcessHandleInformation'),
(52, 'ProcessMitigationPolicy'),
(53, 'ProcessDynamicFunctionTableInformation'),
(54, 'ProcessHandleCheckingMode'),
(55, 'ProcessKeepAliveCount'),
(56, 'ProcessRevokeFileHandles'),
(57, 'ProcessWorkingSetControl'),
(58, 'ProcessHandleTable'),
(59, 'ProcessCheckStackExtentsMode'),
(60, 'ProcessCommandLineInformation'),
(61, 'ProcessProtectionInformation'),
(62, 'ProcessMemoryExhaustion'),
(63, 'ProcessFaultInformation'),
(64, 'ProcessTelemetryIdInformation'),
(65, 'ProcessCommitReleaseInformation'),
(66, 'ProcessDefaultCpuSetsInformation'),
(67, 'ProcessAllowedCpuSetsInformation'),
(68, 'ProcessSubsystemProcess'),
(69, 'ProcessJobMemoryInformation'),
(70, 'ProcessInPrivate'),
(71, 'ProcessRaiseUMExceptionOnInvalidHandleClose'),
(72, 'ProcessIumChallengeResponse'),
(73, 'ProcessChildProcessInformation'),
(74, 'ProcessHighGraphicsPriorityInformation'),
(75, 'ProcessSubsystemInformation'),
(76, 'ProcessEnergyValues'),
(77, 'ProcessActivityThrottleState'),
(78, 'ProcessActivityThrottlePolicy'),
(79, 'ProcessWin32kSyscallFilterInformation'),
(80, 'ProcessDisableSystemAllowedCpuSets'),
(81, 'ProcessWakeInformation'),
(82, 'ProcessEnergyTrackingState'),
(83, 'ProcessManageWritesToExecutableMemory'),
(84, 'ProcessCaptureTrustletLiveDump'),
(85, 'ProcessTelemetryCoverage'),
(86, 'ProcessEnclaveInformation'),
(87, 'ProcessEnableReadWriteVmLogging'),
(88, 'ProcessUptimeInformation'),
(89, 'ProcessImageSection'),
(90, 'ProcessDebugAuthInformation'),
(91, 'ProcessSystemResourceManagement'),
(92, 'ProcessSequenceNumber'),
(93, 'ProcessLoaderDetour'),
(94, 'ProcessSecurityDomainInformation'),
(95, 'ProcessCombineSecurityDomainsInformation'),
(96, 'ProcessEnableLogging'),
(97, 'ProcessLeapSecondInformation'),
(98, 'ProcessFiberShadowStackAllocation'),
(99, 'ProcessFreeFiberShadowStackAllocation'),
]]
class PROCESS_BASIC_INFORMATION(pstruct.type, versioned):
# XXX: there's 2 versions of this structure on server 2016
# 32-bit -> 24, 32
# 64-bit -> 48, 64
_fields_ = [
(umtypes.NTSTATUS, 'ExitStatus'),
(lambda self: dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'padding(ExitStatus)'),
(P(PEB), 'PebBaseAddress'),
(ULONG_PTR, 'AffinityMask'),
(umtypes.KPRIORITY, 'BasePriority'),
(lambda self: dyn.block(4 if getattr(self, 'WIN64', False) else 0), 'padding(BasePriority)'),
(HANDLE, 'UniqueProcessId'),
(HANDLE, 'InheritedFromUniqueProcessId'),
]
class PROCESS_MEMORY_EXHAUSTION_TYPE(pint.enum, ULONG):
_values_ = [(n, v) for v, n in [
(0, 'PMETypeFaultFastOnCommitFailure'),
]]
class PROCESS_MEMORY_EXHAUSTION_INFO(pstruct.type):
type = PROCESS_INFORMATION_CLASS.byname('ProcessMemoryExhaustion')
_fields_ = [
(USHORT, 'Version'),
(USHORT, 'Reserved'),
(PROCESS_MEMORY_EXHAUSTION_TYPE, 'Value'),
(ULONGLONG, 'Value'),
]
class PROCESS_FAULT_INFORMATION(pstruct.type):
type = PROCESS_INFORMATION_CLASS.byname('ProcessFaultInformation')
_fields_ = [
(ULONG, 'FaultFlags'),
(ULONG, 'AdditionalInfo'),
]
class PROCESS_TELEMETRY_ID_INFORMATION(pstruct.type):
type = PROCESS_INFORMATION_CLASS.byname('ProcessTelemetryIdInformation')
_fields_ = [
(ULONG, 'HeaderSize'),
(ULONG, 'ProcessId'),
(ULONGLONG, 'ProcessStartKey'),
(ULONGLONG, 'CreateTime'),
(ULONGLONG, 'CreateInterruptTime'),
(ULONGLONG, 'ProcessSequenceNumber'),
(ULONGLONG, 'SessionCreateTime'),
(ULONG, 'SessionId'),
(ULONG, 'BootId'),
(ULONG, 'ImageChecksum'),
(ULONG, 'ImageTimeDateStamp'),
(ULONG, 'UserIdOffset'),
(ULONG, 'ImagePathOffset'),
(ULONG, 'PackageNameOffset'),
(ULONG, 'RelativeAppNameOffset'),
(ULONG, 'CommandLineOffset'),
]
@pbinary.littleendian
class API_SET_SCHEMA_FLAGS_(pbinary.flags):
_fields_ = [
(30, 'unused'),
(1, 'HOST_EXTENSION'),
(1, 'SEALED'),
]
class API_SET_HEADER(pstruct.type):
def __init__(self, **attrs):
super(API_SET_HEADER, self).__init__(**attrs)
self._fields_ = f = []
# https://www.geoffchappell.com/studies/windows/win32/apisetschema/index.htm
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) < sdkddkver.NTDDI_WIN8:
f.extend([
(ULONG, 'Version'),
(ULONG, 'Count'),
])
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) >= sdkddkver.NTDDI_WIN8:
f.extend([
(ULONG, 'Version'),
(ULONG, 'Size'),
(API_SET_SCHEMA_FLAGS_, 'Flags'),
(ULONG, 'Count'),
])
else:
raise error.NdkUnsupportedVersion(self)
return
def summary(self):
res = []
for fld in self:
res.append("{:s}={:s}".format(fld, "{:#x}".format(self[fld].int()) if isinstance(self[fld], pint.type) else self[fld].summary()))
return ' '.join(res)
class API_SET_VALUE_ENTRY(pstruct.type):
class _Value(rpointer_t):
_value_ = ULONG
def summary(self):
res = super(API_SET_VALUE_ENTRY._Value, self).summary()
return '{:s} -> {!r}'.format(res, self.d.l.str())
class _object_(pstr.wstring):
def blocksize(self):
try:
parent = self.getparent(API_SET_VALUE_ENTRY)
result = parent['Size'].li.int()
except (ptypes.error.ItemNotFoundError, ptypes.error.InitializationError):
result = 0
return result
def __Value(self):
def _object_(self, parent=self):
parent = self.getparent(API_SET_VALUE_ENTRY)
res = parent['Size'].li.int()
return dyn.clone(pstr.wstring, blocksize=lambda s, sz=res: sz)
return dyn.clone(API_SET_VALUE_ENTRY._Value, _baseobject_=self._baseobject_, _object_=_object_)
_fields_ = [
(lambda s: dyn.clone(s._Value, _baseobject_=s._baseobject_), 'Value'),
(ULONG, 'Size'),
]
class API_SET_VALUE(pstruct.type):
_fields_ = [
(ULONG, 'Count'),
(ULONG, 'EndOfEntriesOffset'),
(ULONG, 'Hash'),
(lambda s: API_SET_VALUE_ENTRY if s['Count'].li.int() > 1 else ptype.undefined, 'OriginalRedirect'),
(lambda s: dyn.array(API_SET_VALUE_ENTRY, s['Count'].li.int()), 'Entry'),
]
class API_SET_ENTRY(pstruct.type):
_baseobject_ = None
class _NameOffset(rpointer_t):
_value_ = ULONG
def summary(self):
res = super(API_SET_ENTRY._NameOffset, self).summary()
return '{:s} -> {!r}'.format(res, self.d.li.str())
class _object_(pstr.wstring):
def blocksize(self):
try:
parent = self.getparent(API_SET_ENTRY)
result = parent['NameLength'].li.int()
except (ptypes.error.ItemNotFoundError, ptypes.error.InitializationError):
result = 0
return result
class _ValueOffset(rpointer_t):
_value_ = ULONG
_object_ = API_SET_VALUE
_fields_ = [
(lambda s: dyn.clone(s._NameOffset, _baseobject_=s._baseobject_), 'NameOffset'),
(ULONG, 'NameLength'),
(lambda s: dyn.clone(s._ValueOffset, _baseobject_=s._baseobject_), 'ValueOffset'),
]
class API_SET_MAP(pstruct.type, versioned):
def __Entry(self):
res = self['Header'].li
return dyn.array(API_SET_ENTRY, res['Count'].int(), recurse={'_baseobject_':self})
_fields_ = [
(API_SET_HEADER, 'Header'),
(__Entry, 'Entry'),
]
class KSYSTEM_TIME(pstruct.type):
_fields_ = [
(ULONG, 'LowPart'),
(LONG, 'High1Time'),
(LONG, 'High2Time'),
]
def summary(self):
return "LowPart={:#x} High1Time={:#x} High2Time={:#x}".format(self['LowPart'].int(), self['High1Time'].int(), self['High2Time'].int())
class WOW64_SHARED_INFORMATION(pint.enum):
_values_ = [
('SharedNtdll32LdrInitializeThunk', 0),
('SharedNtdll32KiUserExceptionDispatcher', 1),
('SharedNtdll32KiUserApcDispatcher', 2),
('SharedNtdll32KiUserCallbackDispatcher', 3),
('SharedNtdll32LdrHotPatchRoutine', 4),
('SharedNtdll32ExpInterlockedPopEntrySListFault', 5),
('SharedNtdll32ExpInterlockedPopEntrySListResume', 6),
('SharedNtdll32ExpInterlockedPopEntrySListEnd', 7),
('SharedNtdll32RtlUserThreadStart', 8),
('SharedNtdll32pQueryProcessDebugInformationRemote', 9),
('SharedNtdll32EtwpNotificationThread', 10),
('SharedNtdll32BaseAddress', 11),
('Wow64SharedPageEntriesCount', 12),
]
class NT_PRODUCT_TYPE(pint.enum, ULONG):
_values_ = [
('NtProductWinNt', 1),
('NtProductLanManNt', 2),
('NtProductServer', 3),
]
class ALTERNATIVE_ARCHITECTURE_TYPE(pint.enum, ULONG):
_values_ = [
('StandardDesign', 0),
('NEC98x86', 1),
('EndAlternatives', 2),
]
class XSTATE_CONFIGURATION(pstruct.type):
class FEATURE(pstruct.type):
_fields_ = [(ULONG, 'Offset'), (ULONG, 'Size')]
_fields_ = [
(ULONGLONG, 'EnabledFeatures'),
(ULONG, 'Size'),
(ULONG, 'OptimizedSave'),
(dyn.array(FEATURE, 64), 'Features'),
]
class SHARED_GLOBAL_FLAGS_(pbinary.flags):
_fields_ = [
(21, 'SpareBits'),
(1, 'STATE_SEPARATION_ENABLED'), # 0x00000400
(1, 'MULTIUSERS_IN_SESSION_SKU'), # 0x00000200
(1, 'MULTI_SESSION_SKU'), # 0x00000100
(1, 'SECURE_BOOT_ENABLED'), # 0x00000080
(1, 'CONSOLE_BROKER_ENABLED'), # 0x00000040
# (1, 'SEH_VALIDATION_ENABLED'), # 0x00000040 (W7)
(1, 'DYNAMIC_PROC_ENABLED'), # 0x00000020
(1, 'LKG_ENABLED'), # 0x00000010
# (1, 'SPARE'), # 0x00000010 (W7)
(1, 'INSTALLER_DETECT_ENABLED'), # 0x00000008
(1, 'VIRT_ENABLED'), # 0x00000004
(1, 'ELEVATION_ENABLED'), # 0x00000002
(1, 'ERROR_PORT'), # 0x00000001
]
PROCESSOR_MAX_FEATURES = 64
class PF_(parray.type):
_object_, length = BOOLEAN, PROCESSOR_MAX_FEATURES
_aliases_ = [
('FLOATING_POINT_PRECISION_ERRATA', 0), # 4.0 and higher (x86)
('FLOATING_POINT_EMULATED', 1), # 4.0 and higher (x86)
('COMPARE_EXCHANGE_DOUBLE', 2), # 4.0 and higher
('MMX_INSTRUCTIONS_AVAILABLE', 3), # 4.0 and higher
('PPC_MOVEMEM_64BIT_OK', 4), # none
('ALPHA_BYTE_INSTRUCTIONS', 5), # none
('XMMI_INSTRUCTIONS_AVAILABLE', 6), # 5.0 and higher
('3DNOW_INSTRUCTIONS_AVAILABLE', 7), # 5.0 and higher
('RDTSC_INSTRUCTION_AVAILABLE', 8), # 5.0 and higher
('PAE_ENABLED', 9), # 5.0 and higher
('XMMI64_INSTRUCTIONS_AVAILABLE', 10), # 5.1 and higher
('SSE_DAZ_MODE_AVAILABLE', 11), # none
('NX_ENABLED', 12), # late 5.1; late 5.2 and higher
('SSE3_INSTRUCTIONS_AVAILABLE', 13), # 6.0 and higher
('COMPARE_EXCHANGE128', 14), # 6.0 and higher (x64)
('COMPARE64_EXCHANGE128', 15), # none
('CHANNELS_ENABLED', 16), # 6.0 only
('XSAVE_ENABLED', 17), # 6.1 and higher
('ARM_VFP_32_REGISTERS_AVAILABLE', 18), # none
('ARM_NEON_INSTRUCTIONS_AVAILABLE', 19), # none
('SECOND_LEVEL_ADDRESS_TRANSLATION', 20), # 6.2 and higher
('VIRT_FIRMWARE_ENABLED', 21), # 6.2 and higher
('RDWRFSGSBASE_AVAILABLE', 22), # 6.2 and higher (x64)
('FASTFAIL_AVAILABLE', 23), # 6.2 and higher
('ARM_DIVIDE_INSTRUCTION_AVAILABLE', 24), # none
('ARM_64BIT_LOADSTORE_ATOMIC', 25), # none
('ARM_EXTERNAL_CACHE_AVAILABLE', 26), # none
('ARM_FMAC_INSTRUCTIONS_AVAILABLE', 27), # none
('RDRAND_INSTRUCTION_AVAILABLE', 28), # 6.3 and higher
('ARM_V8_INSTRUCTIONS_AVAILABLE', 29), # none
('ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE', 30), # none
('ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE', 31), # none
('RDTSCP_INSTRUCTION_AVAILABLE', 32), # 10.0 and higher
]
class KUSER_SHARED_DATA(pstruct.type, versioned):
# FIXME: https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
class TscQpc(pbinary.struct):
_fields_ = [
(16, 'Pad'),
(6, 'Shift'),
(1, 'SpareFlag'),
(1, 'Enabled'),
]
class SharedDataFlags(pbinary.flags):
_fields_ = [
(25, 'SpareBits'),
(0, 'DbgStateSeparationEnabled'), # 1709
(0, 'DbgMultiUsersInSessionSKU'), # 1607
(0, 'DbgMultiSessionSKU'), # 10.0
(0, 'DbgSecureBootEnabled'), # 6.2
(1, 'DbgSEHValidationEnabled'), # 6.1
(0, 'DbgConsoleBrokerEnabled'), # 6.2
(1, 'DbgDynProcessorEnabled'), # 6.1
(1, 'DbgSystemDllRelocated'), # 6.0
(0, 'DbgLkgEnabled'), # 6.2
(1, 'DbgInstallerDetectEnabled'), # 6.0
(1, 'DbgVirtEnabled'), # 6.0
(1, 'DbgElevationEnabled'), # 6.0
(1, 'DbgErrorPortPresent'), # 6.0
]
def __init__(self, **attrs):
super(KUSER_SHARED_DATA, self).__init__(**attrs)
self._fields_ = f = []
PROCESSOR_MAX_FEATURES = 64
f.extend([
(ULONG, 'TickCountLowDeprecated'),
(ULONG, 'TickCountMultiplier'),
(KSYSTEM_TIME, 'InterruptTime'),
(KSYSTEM_TIME, 'SystemTime'),
(KSYSTEM_TIME, 'TimeZoneBias'),
(USHORT, 'ImageNumberLow'),
(USHORT, 'ImageNumberHigh'),
(dyn.clone(pstr.wstring, length=260), 'NtSystemRoot'),
(ULONG, 'MaxStackTraceDepth'),
(ULONG, 'CryptoExponent'),
(ULONG, 'TimeZoneId'),
(ULONG, 'LargePageMinimum'),
(dyn.array(ULONG, 7), 'Reserved2'),
(NT_PRODUCT_TYPE, 'NtProductType'),
(BOOLEAN, 'ProductTypeIsValid'),
(dyn.align(4), 'ProductTypeIsValidAlignment'),
(ULONG, 'NtMajorVersion'),
(ULONG, 'NtMinorVersion'),
(dyn.array(BOOLEAN, PROCESSOR_MAX_FEATURES), 'ProcessorFeatures'), # PF_
(ULONG, 'Reserved1'),
(ULONG, 'Reserved3'),
(ULONG, 'TimeSlip'),
(ALTERNATIVE_ARCHITECTURE_TYPE, 'AlternativeArchitecture'),
(ULONG, 'AltArchitecturePad'),
(LARGE_INTEGER, 'SystemExpirationDate'),
(ULONG, 'SuiteMask'),
(BOOLEAN, 'KdDebuggerEnabled'),
(UCHAR, 'NXSupportPolicy'),
(dyn.align(4), 'ActiveConsoleAlignment'),
(ULONG, 'ActiveConsoleId'),
(ULONG, 'DismountCount'),
(ULONG, 'ComPlusPackage'),
(ULONG, 'LastSystemRITEventTickCount'),
(ULONG, 'NumberOfPhysicalPages'),
(BOOLEAN, 'SafeBootMode'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) == sdkddkver.NTDDI_WIN7:
f.append((self.TscQpc, 'TscQpc'))
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) > sdkddkver.NTDDI_WIN7:
f.append((dyn.array(pint.uint8_t, 4), 'Reserved12'))
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) == sdkddkver.NTDDI_WINXP:
f.append((ULONG, 'TraceLogging'))
elif sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) == sdkddkver.NTDDI_WIN7:
f.extend([
(self.SharedDataFlags, 'SharedDataFlags'),
(dyn.array(ULONG, 1), 'DataFlagsPad'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) in {sdkddkver.NTDDI_WINXP, sdkddkver.NTDDI_WIN7}:
f.extend([
(ULONGLONG, 'TestRetInstruction'),
(ULONG, 'SystemCall'),
(ULONG, 'SystemCallReturn'),
(dyn.array(ULONGLONG, 3), 'SystemCallPad'),
])
f.extend([
(KSYSTEM_TIME, 'TickCount'),
(dyn.array(LONG, 1), 'TickCountPad'),
(ULONG, 'Cookie'),
])
if sdkddkver.NTDDI_MAJOR(self.NTDDI_VERSION) == sdkddkver.NTDDI_WIN7:
f.extend([
(dyn.array(ULONG, 1), 'CookiePad'), # pad to what, a ULONGLONG?
(LONGLONG, 'ConsoleSessionForegroundProcessId'),
(dyn.array(ULONG, 16), 'Wow64SharedInformation'),
(dyn.array(USHORT, 16), 'UserModeGlobalLogger'),
(ULONG, 'ImageFileExecutionOptions'),
(ULONG, 'LangGenerationCount'),
(ULONGLONG, 'Reserved5'),
(ULONGLONG, 'InterruptTimeBias'),
(ULONGLONG, 'TscQpcBias'),
(ULONG, 'ActiveProcessorCount'),
(USHORT, 'ActiveGroupCount'),
(USHORT, 'Reserved4'),
(ULONG, 'AitSamplingValue'),
(ULONG, 'AppCompatFlag'),
(ULONGLONG, 'SystemDllNativeRelocation'),
(ULONGLONG, 'SystemDllWowRelocation'),
# (dyn.array(LONG, 1), 'XStatePad'),
# (dyn.align(0x10), 'XStatePad'), # ???
(XSTATE_CONFIGURATION, 'XState'),
])
return
class ETHREAD(pstruct.type, versioned):
_fields_ = [
(ketypes.KTHREAD, 'Tcb'),
(LARGE_INTEGER, 'CreateTime'),
(LIST_ENTRY, 'KeyedWaitChain'), # XXX: union
(LONG, 'ExitStatus'), # XXX: union
(LIST_ENTRY, 'PostBlockList'), # XXX: union
(PVOID, 'KeyedWaitValue'), # XXX: union
(ULONG, 'ActiveTimerListLock'),
(LIST_ENTRY, 'ActiveTimerListHead'),
(umtypes.CLIENT_ID, 'Cid'),
(ketypes.KSEMAPHORE, 'KeyedWaitSemaphore'), # XXX: union
# (PS_CLIENT_SECURITY_CONTEXT, 'ClientSecurity'),
(dyn.block(4), 'ClientSecurity'),
(LIST_ENTRY, 'IrpList'),
(ULONG, 'TopLevelIrp'),
# (PDEVICE_OBJECT, 'DeviceToVerify'),
(P(dyn.block(0xb8)), 'DeviceToVerify'),
# (_PSP_RATE_APC *, 'RateControlApc'),
(dyn.block(4), 'RateControlApc'),
(PVOID, 'Win32StartAddress'),
(PVOID, 'SparePtr0'),
(LIST_ENTRY, 'ThreadListEntry'),
# (EX_RUNDOWN_REF, 'RundownProtect'),
# (EX_PUSH_LOCK, 'ThreadLock'),
(dyn.block(4), 'RundownProtect'),
(dyn.block(4), 'ThreadLock'),
(ULONG, 'ReadClusterSize'),
(LONG, 'MmLockOrdering'),
(ULONG, 'CrossThreadFlags'), # XXX: union
(ULONG, 'SameThreadPassiveFlags'), # XXX: union
(ULONG, 'SameThreadApcFlags'), # XXX
(UCHAR, 'CacheManagerActive'),
(UCHAR, 'DisablePageFaultClustering'),
(UCHAR, 'ActiveFaultCount'),
(ULONG, 'AlpcMessageId'),
(PVOID, 'AlpcMessage'), # XXX: union
(LIST_ENTRY, 'AlpcWaitListEntry'),
(ULONG, 'CacheManagerCount'),
]
class PROCESS_BASIC_INFORMATION(pstruct.type):
_fields_ = [
(NTSTATUS, 'ExitStatus'),
(P(PEB), 'PebBaseAddress'),
(ULONG_PTR, 'AffinityMask'),
(umtypes.KPRIORITY, 'BasePriority'),
(ULONG_PTR, 'UniqueProcessId'),
(ULONG_PTR, 'InheritedFromUniqueProcessId'),
]
class PROCESS_EXTENDED_BASIC_INFORMATION(pstruct.type):
@pbinary.littleendian
class _Flags(pbinary.flags):
'ULONG'
_fields_ = [
(28, 'SpareBits'),
(1, 'IsCrossSectionCreate'),
(1, 'IsProcessDeleting'),
(1, 'IsWow64Process'),
(1, 'IsProtectedProcess'),
]
_fields_ = [
(SIZE_T, 'Size'),
(PROCESS_BASIC_INFORMATION, 'BasicInfo'),
(_Flags, 'Flags'),
(ptype.undefined, 'undefined'),
]
def alloc(self, **fields):
res = super(PROCESS_EXTENDED_BASIC_INFORMATION, self).alloc(**fields)
return res if 'Size' in fields else res.set(Size=res.size())
class COPYDATASTRUCT(pstruct.type):
_fields_ = [
(ULONG_PTR, 'dwData'),
(DWORD, 'cbData'),
(lambda self: P(dyn.block(self['cbData'].li.int())), 'lpData'),
]
def alloc(self, **fields):
res = super(COPYDATASTRUCT, self).alloc(**fields)
if res['lpData'].d.initializedQ():
return res if 'cbData' in fields else res.set(cbData=res['lpData'].d.size())
return res
class STARTUPINFO(pstruct.type):
_fields_ = [
(DWORD, 'cb'),
(lambda self: getattr(self, '__string__', umtypes.PSTR), 'lpReserved'),
(lambda self: getattr(self, '__string__', umtypes.PSTR), 'lpDesktop'),
(lambda self: getattr(self, '__string__', umtypes.PSTR), 'lpTitle'),
(DWORD, 'dwX'),
(DWORD, 'dwY'),
(DWORD, 'dwXSize'),
(DWORD, 'dwYSize'),
(DWORD, 'dwXCountChars'),
(DWORD, 'dwYCountChars'),
(DWORD, 'dwFillAttribute'),
(DWORD, 'dwFlags'),
(WORD, 'wShowWindow'),
(WORD, 'cbReserved2'),
(lambda self: P(dyn.block(self['cbReserved2'].li.int())), 'lpReserved2'),
(HANDLE, 'hStdInput'),
(HANDLE, 'hStdOutput'),
(HANDLE, 'hStdError'),
(ptype.undefined, 'undefined'),
]
def alloc(self, **fields):
res = super(STARTUPINFO, self).alloc(**fields)
return res if 'cb' in fields else res.set(cb=res.size())
class STARTUPINFOA(STARTUPINFO):
pass
class STARTUPINFOW(STARTUPINFO):
__string__ = umtypes.PWSTR
if __name__ == '__main__':
import ctypes
def openprocess (pid):
k32 = ctypes.WinDLL('kernel32.dll')
res = k32.OpenProcess(0x30 | 0x0400, False, pid)
return res
def getcurrentprocess ():
k32 = ctypes.WinDLL('kernel32.dll')
return k32.GetCurrentProcess()
def getPBIObj (handle):
nt = ctypes.WinDLL('ntdll.dll')
class ProcessBasicInformation(ctypes.Structure):
_fields_ = [('Reserved1', ctypes.c_uint32),
('PebBaseAddress', ctypes.c_uint32),
('Reserved2', ctypes.c_uint32 * 2),
('UniqueProcessId', ctypes.c_uint32),
('Reserved3', ctypes.c_uint32)]
pbi = ProcessBasicInformation()
res = nt.NtQueryInformationProcess(handle, 0, ctypes.byref(pbi), ctypes.sizeof(pbi), None)
return pbi
handle = getcurrentprocess()
pebaddress = getPBIObj(handle).PebBaseAddress
import ptypes, pstypes
Peb = pstypes.PEB()
Peb.setoffset(pebaddress)
Peb.load()
Ldr = Peb['Ldr'].d.l
for x in Ldr['InLoadOrderModuleList'].walk():
print(x['BaseDllName'].str(), x['FullDllName'].str())
print(hex(x['DllBase'].int()), hex(x['SizeOfImage'].int()))
| arizvisa/syringe | lib/ndk/pstypes.py | Python | bsd-2-clause | 57,087 |
import ConfigParser
from .settings import SECTIONS, CONFIG
config = ConfigParser.ConfigParser()
config.read(CONFIG)
if not config.has_section(SECTIONS['INCREMENTS']):
config.add_section(SECTIONS['INCREMENTS'])
with open(CONFIG, 'w') as f:
config.write(f)
def read_since_ids(users):
"""
Read max ids of the last downloads
:param users: A list of users
Return a dictionary mapping users to ids
"""
since_ids = {}
for user in users:
if config.has_option(SECTIONS['INCREMENTS'], user):
since_ids[user] = config.getint(SECTIONS['INCREMENTS'], user) + 1
return since_ids
def set_max_ids(max_ids):
"""
Set max ids of the current downloads
:param max_ids: A dictionary mapping users to ids
"""
config.read(CONFIG)
for user, max_id in max_ids.items():
config.set(SECTIONS['INCREMENTS'], user, str(max_id))
with open(CONFIG, 'w') as f:
config.write(f)
def remove_since_id(user):
if config.has_option(SECTIONS['INCREMENTS'], user):
config.remove_option(SECTIONS['INCREMENTS'], user)
with open(CONFIG, 'w') as f:
config.write(f)
| wenli810620/twitter-photos | twphotos/increment.py | Python | bsd-2-clause | 1,165 |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# Copyright (c) 2019 David Shah <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import zcu104
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litex.soc.cores.bitbang import I2CMaster
from litedram.modules import MTA4ATF51264HZ
from litedram.phy import usddrphy
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_pll4x = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
# # #
self.submodules.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request("clk125"), 125e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_idelay, 500e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.specials += [
Instance("BUFGCE_DIV", name="main_bufgce_div",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE", name="main_bufgce",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
]
self.submodules.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_idelay, cd_sys=self.cd_sys)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(125e6), with_led_chaser=True, **kwargs):
platform = zcu104.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on ZCU104",
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR4 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram"),
memtype = "DDR4",
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 500e6)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MTA4ATF51264HZ(sys_clk_freq, "1:4"),
size = 0x40000000,
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on ZCU104")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--sys-clk-freq", default=125e6, help="System clock frequency.")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
| litex-hub/litex-boards | litex_boards/targets/xilinx_zcu104.py | Python | bsd-2-clause | 4,423 |
from JumpScale import j
from GitFactory import GitFactory
j.base.loader.makeAvailable(j, 'clients')
j.clients.git = GitFactory()
| Jumpscale/jumpscale6_core | lib/JumpScale/baselib/git/__init__.py | Python | bsd-2-clause | 131 |
def fat(n):
result = 1
while n > 0:
result = result * n
n = n - 1
return result
# testes
print("Fatorial de 3: ", fat(3));
| Gigers/data-struct | algoritimos/Python/fatorial-while.py | Python | bsd-2-clause | 158 |
import warnings
class DeprecatedCallableStr(str):
do_no_call_in_templates = True
def __new__(cls, value, *args, **kwargs):
return super(DeprecatedCallableStr, cls).__new__(cls, value)
def __init__(self, value, warning, warning_cls):
self.warning, self.warning_cls = warning, warning_cls
def __call__(self, *args, **kwargs):
warnings.warn(self.warning, self.warning_cls, stacklevel=2)
return str(self)
def __repr__(self):
super_repr = super(DeprecatedCallableStr, self).__repr__()
return '<DeprecatedCallableStr {}>'.format(super_repr)
| takeflight/wagtailnews | wagtailnews/deprecation.py | Python | bsd-2-clause | 609 |
# --------------------------------------------------------
# TensorFlow for Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# -------------------------------------------------------- | neopenx/Dragon | Dragon/python/dragon/vm/tensorflow/training/__init__.py | Python | bsd-2-clause | 193 |
import sympy.core.cache
# Why doesn't it work to use "import sympy.core.compatibility" and call "with_metaclass" as "sympy.core.compatibility.with_metaclass"?
from sympy.core.compatibility import with_metaclass
from sympy.core.function import AppliedUndef, FunctionClass
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
from sympy.core.cache import cacheit
'''
Tensor('name') creates a tensor that does not take arguments
>>> T = Tensor('T')
>>> T(*index)
TensorFunction creates a teonsr that do take arguments
>>> TF = TensorFunction('TF')
>>> TF(*index)(*args)
Both Tensor and TensorFuncton takes any number of index. Anytning can be put as an index but only indices of type sympy.Dummy or sympy.Symbol will be contracted over in varios functions.
>>> a=sympy.Dymmy('a'); b=sympy.Dymmy('b')
>>> isinstance(Tensor('T')(a,b), sympy.Symbol)
True
>>> a=sympy.Dymmy('a'); b=sympy.Dymmy('b')
>>> isinstance(TensorFunction('TF')(a,b), sympy.FunctionClass)
True
>>> a=sympy.Dymmy('a'); b=sympy.Dummy('b);
>>> x=sympy.Symbol('x')
>>> isinstance(TensorFunction('TF')(a,b)(x), sympy.Function)
True
'''
def isTensor(exp):
'''Test if exp is a Tensor or TensorFunction.
Only returns True for Tensor with index/indecis and TensorFunction with index/indecis and argument(s)
Only test the top type of exp, e.g. does not detect a derivative of a tensor as a tensor'''
return (isinstance(exp,(AppliedTensor,
AppliedAppliedTensorFunction)))
def tensorName(exp):
'''Writes exp a sring as a sring, without indices, without arguments.'''
if isinstance(exp,AppliedTensor):
return type(exp).__name__
if isinstance(type(exp),AppliedTensorFunction):
return type(type(exp)).__name__
if hasattr(exp,'args'):
return type(exp).__name__
return str(exp)
def longTensorName(exp):
'''Writes exp a sring as a sring, without indices, with arguments.'''
if isinstance(exp,AppliedTensor):
return type(exp).__name__
if isinstance(type(exp),AppliedTensorFunction):
return (type(type(exp)).__name__
+ "("
+ ", ".join([longTensorName(arg) for arg in exp.args])
+ ")" )
if getattr(exp, 'args', []):
return (type(exp).__name__
+ "("
+ ", ".join([longTensorName(arg) for arg in exp.args])
+ ")" )
return str(exp)
# Defines what types of indices that can be contracted over
def isAllowedDummyIndex(ind):
'''Returns True if object is allowed to use as a dummy index to be summed over acording to Einsteins sumation convention'''
return isinstance(ind, (sympy.Symbol, sympy.Dummy) )
def withNewIndex(tensor,index):
if isTensor(tensor):
return tensor.withNewIndex(*index)
return tensor
class TensorFunction(BasicMeta):
@cacheit
def __new__(mcl, name, *arg, **kw):
if (name == "AppliedTensorFunction"):
return type.__new__(mcl, name, *arg, **kw)
return type.__new__(mcl, name, (AppliedTensorFunction,), kw)
def __init__(self, *arg, **kw):
pass
#FIXME use sympy.core.compatibility.with_metaclass or similar
class AppliedTensorFunction(FunctionClass):
__metaclass__ = TensorFunction
@cacheit
def __new__(mcl, *index, **kw):
name = mcl.__name__ + str(index)
ret = type.__new__(mcl, name, (AppliedAppliedTensorFunction,AppliedUndef),kw)
ret.index = index
return ret
is_Tensor = True
class AppliedAppliedTensorFunction(AppliedUndef):
def withNewIndex(self, *index):
return type(type(self))(*index)(*self.args)
class Tensor(ManagedProperties):
@cacheit
def __new__(mcl, name, *arg, **kw):
if (name == "AppliedTensor"):
return type.__new__(mcl, name, *arg, **kw)
return type.__new__(mcl, name, (AppliedTensor,),kw)
#FIXME use sympy.core.compatibility.with_metaclass or
class AppliedTensor(sympy.Symbol):
__metaclass__ = Tensor
@cacheit
def __new__(cls, *index, **kw):
name = cls.__name__ + str(index)
ret = sympy.Symbol.__new__(cls, name, **kw)
ret.index = index
return ret
is_Tensor = True
def withNewIndex(self,*index):
return type(self)(*index)
##################### Here be unittest #####################
import unittest
class TestTensor(unittest.TestCase):
def setUp(self):
self.t = Tensor('t')
self.T = Tensor('t')
self.tf = TensorFunction('tf')
self.TF = TensorFunction('tf')
self.a = sympy.Dummy('a')
self.b = sympy.Dummy('b')
self.x = sympy.Symbol('x')
self.f = sympy.Function('f')
def test_classRelations(self):
t=self.t; tf=self.tf; a=self.a; b=self.b; x=self.x
self.assertEqual( type(t), Tensor )
self.assertEqual( type(t(a,b)), t )
self.assertEqual( type(t(a,b)).__base__, AppliedTensor )
self.assertTrue( isinstance(t(a,b), sympy.Symbol))
self.assertEqual( type(tf(a,b)), tf )
self.assertEqual( type(tf(a,b)).__base__, AppliedTensorFunction )
self.assertTrue( isinstance(tf(a,b), sympy.FunctionClass) )
self.assertEqual( type(tf(a,b)(x)), tf(a,b) )
self.assertEqual( type(tf(a,b)(x)).__base__, AppliedAppliedTensorFunction )
self.assertTrue( isinstance(tf(a,b)(x), AppliedUndef) )
self.assertTrue( isinstance(tf(a,b)(x), sympy.Function) )
def test_withNewIndex(self):
t=self.t; tf=self.tf; a=self.a; b=self.b; x=self.x
self.assertEqual( t(a).withNewIndex(b,b), t(b,b) )
self.assertEqual( tf(a)(x).withNewIndex(b,b), tf(b,b)(x) )
def test_isTensor(self):
t=self.t; tf=self.tf; a=self.a; b=self.b; x=self.x; f=self.f
self.assertFalse( isTensor(a) )
self.assertFalse( isTensor(f(x)) )
self.assertTrue( isTensor(t(a,b)) )
self.assertTrue( isTensor(tf(a,b)(x,x)) )
def test_eqality(self):
t=self.t; tf=self.tf; a=self.a; b=self.b; x=self.x; TF=self.TF; T=self.T
self.assertEqual( t(a), T(a) )
self.assertNotEqual( t(b), t(a) )
self.assertEqual( tf(a)(x,b), TF(a)(x,b) )
self.assertNotEqual( tf(a)(x,b), tf(a)(x,x) )
self.assertNotEqual( tf(a)(x,b), tf(b)(x,b) )
if __name__ == '__main__':
unittest.main()
| LindaLinsefors/EinSumConv | EinSumConv/tensor.py | Python | bsd-2-clause | 6,432 |
#
# Sending emails in combination
# with Motion surveillance software
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
import smtplib
from datetime import datetime
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def prompt(prompt):
return raw_input(prompt).strip()
fromaddr = '[email protected]' # prompt("From: ")
toaddrs = '[email protected]' # prompt("To: ")
subject = 'Security Alert.' # prompt("Subject: ")
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddrs
msg['Subject'] = subject
# Add the From: and To: headers at the start!
# msg = ("From: %s\r\nTo: %s\r\n\r\nSubject: %s\r\n"
# % (fromaddr, ", ".join(toaddrs), subject))
# print "Enter message, end with ^D (Unix) or ^Z (Windows):"
# body = ''
#while 1:
# try:
# line = raw_input()
# except EOFError:
# break
# if not line:
# break
# body = body + line
body = 'A motion has been detected.\nTime: %s' % str(datetime.now())
msg.attach(MIMEText(body, 'plain'))
print "Message length is " + repr(len(msg))
smtp = smtplib.SMTP()
# smtp.starttls()
smtp.set_debuglevel(1)
smtp.connect('smtp.hilpisch.com', 587)
smtp.login('hilpisch13', 'henrynikolaus06')
text = msg.as_string()
smtp.sendmail(fromaddr, toaddrs, text)
smtp.quit()
print text
| yhilpisch/rpi | doc/_store/mail.py | Python | bsd-2-clause | 1,304 |
from actstream.models import Action
from django.test import TestCase
from cyidentity.cyfullcontact.tests.util import create_sample_contact_info
class FullContactActivityStreamTestCase(TestCase):
def test_contact_create(self):
contact_info = create_sample_contact_info()
action = Action.objects.actor(contact_info).latest('timestamp')
self.assertEqual(action.verb, 'FullContact information was created')
| shawnhermans/cyborgcrm | cyidentity/cyfullcontact/tests/test_activity_stream.py | Python | bsd-2-clause | 433 |
#!/usr/bin/env python3
# Copyright (C) 2016 Job Snijders <[email protected]>
#
# This file is part of rtrsub
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rtrsub
version = rtrsub.__version__
import codecs
import os
import sys
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
with codecs.open(join(here, 'README.md'), encoding='utf-8') as f:
README = f.read()
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload')
print("You probably want to also tag the version now:")
print((" git tag -a %s -m 'version %s'" % (version, version)))
print(" git push --tags")
sys.exit()
install_reqs = parse_requirements('requirements.txt')
reqs = install_reqs
setup(
name='rtrsub',
version=version,
maintainer="Job Snijders",
maintainer_email='[email protected]',
url='https://github.com/job/rtrsub',
description='RTR Substitution',
long_description=README,
long_description_content_type="text/markdown",
license='BSD 2-Clause',
keywords='rpki prefix routing networking',
setup_requires=reqs,
install_requires=reqs,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
],
packages=find_packages(exclude=['tests', 'tests.*']),
entry_points={'console_scripts': ['rtrsub = rtrsub.rtrsub:main']},
)
| job/rtrsub | setup.py | Python | bsd-2-clause | 3,054 |
# -*- coding: utf-8 -*-
"""
Barycenters
===========
This example shows three methods to compute barycenters of time series.
For an overview over the available methods see the :mod:`tslearn.barycenters`
module.
*tslearn* provides three methods for calculating barycenters for a given set of
time series:
* *Euclidean barycenter* is simply the arithmetic mean for
each individual point in time, minimizing the summed euclidean distance
for each of them. As can be seen below, it is very different from the
DTW-based methods and may often be inappropriate. However, it is the
fastest of the methods shown.
* *DTW Barycenter Averaging (DBA)* is an iteratively refined barycenter,
starting out with a (potentially) bad candidate and improving it
until convergence criteria are met. The optimization can be accomplished
with (a) expectation-maximization [1] and (b) stochastic subgradient
descent [2]. Empirically, the latter "is [often] more stable and finds better
solutions in shorter time" [2].
* *Soft-DTW barycenter* uses a differentiable loss function to iteratively
find a barycenter [3]. The method itself and the parameter
:math:`\\gamma=1.0` is described in more detail in the section on
:ref:`DTW<dtw>`. There is also a dedicated
:ref:`example<sphx_glr_auto_examples_clustering_plot_barycenter_interpolate.py>`
available.
[1] F. Petitjean, A. Ketterlin & P. Gancarski. A global averaging method for
dynamic time warping, with applications to clustering. Pattern Recognition,
Elsevier, 2011, Vol. 44, Num. 3, pp. 678-693.
[2] D. Schultz & B. Jain. Nonsmooth Analysis and Subgradient Methods for
Averaging in Dynamic Time Warping Spaces. Pattern Recognition, 74, 340-358.
[3] M. Cuturi & M. Blondel. Soft-DTW: a Differentiable Loss Function for
Time-Series. ICML 2017.
"""
# Author: Romain Tavenard, Felix Divo
# License: BSD 3 clause
import numpy
import matplotlib.pyplot as plt
from tslearn.barycenters import \
euclidean_barycenter, \
dtw_barycenter_averaging, \
dtw_barycenter_averaging_subgradient, \
softdtw_barycenter
from tslearn.datasets import CachedDatasets
# fetch the example data set
numpy.random.seed(0)
X_train, y_train, _, _ = CachedDatasets().load_dataset("Trace")
X = X_train[y_train == 2]
length_of_sequence = X.shape[1]
def plot_helper(barycenter):
# plot all points of the data set
for series in X:
plt.plot(series.ravel(), "k-", alpha=.2)
# plot the given barycenter of them
plt.plot(barycenter.ravel(), "r-", linewidth=2)
# plot the four variants with the same number of iterations and a tolerance of
# 1e-3 where applicable
ax1 = plt.subplot(4, 1, 1)
plt.title("Euclidean barycenter")
plot_helper(euclidean_barycenter(X))
plt.subplot(4, 1, 2, sharex=ax1)
plt.title("DBA (vectorized version of Petitjean's EM)")
plot_helper(dtw_barycenter_averaging(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 3, sharex=ax1)
plt.title("DBA (subgradient descent approach)")
plot_helper(dtw_barycenter_averaging_subgradient(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 4, sharex=ax1)
plt.title("Soft-DTW barycenter ($\gamma$=1.0)")
plot_helper(softdtw_barycenter(X, gamma=1., max_iter=50, tol=1e-3))
# clip the axes for better readability
ax1.set_xlim([0, length_of_sequence])
# show the plot(s)
plt.tight_layout()
plt.show()
| rtavenar/tslearn | tslearn/docs/examples/clustering/plot_barycenters.py | Python | bsd-2-clause | 3,323 |
import pump
import pump_bfd
class BFDSinkEx(pump_bfd.BFDSink):
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(pump_bfd.BFDSink, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.mode = getattr(opts, "mode", "diff")
self.init_worker(pump_bfd.BFDSink.run)
@staticmethod
def check_spec(source_bucket, source_node, opts, spec, cur):
pump.Sink.check_spec(source_bucket, source_node, opts, spec, cur)
seqno, dep, faillover_log, snapshot_markers = pump_bfd.BFD.find_seqno(opts, spec,
source_bucket['name'],
source_node['hostname'],
getattr(opts, "mode", "diff"))
if 'seqno' in cur:
cur['seqno'][(source_bucket['name'], source_node['hostname'])] = seqno
else:
cur['seqno'] = {(source_bucket['name'], source_node['hostname']): seqno}
if 'failoverlog' in cur:
cur['failoverlog'][(source_bucket['name'], source_node['hostname'])] = faillover_log
else:
cur['failoverlog'] = {(source_bucket['name'], source_node['hostname']): faillover_log}
if 'snapshot' in cur:
cur['snapshot'][(source_bucket['name'], source_node['hostname'])] = snapshot_markers
else:
cur['snapshot'] = {(source_bucket['name'], source_node['hostname']): snapshot_markers} | TOTVS/mdmpublic | couchbase-cli/lib/python/pump_bfd2.py | Python | bsd-2-clause | 1,621 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_google_dork.models
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
replaces = [('django_google_dork', '0001_initial'), ('django_google_dork', '0002_auto_20141116_1551'), ('django_google_dork', '0003_run_engine')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', django_google_dork.models.CampaignNameField(unique=True, max_length=32)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Dork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('query', django_google_dork.models.DorkQueryField(max_length=256)),
('campaign', models.ForeignKey(to='django_google_dork.Campaign')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1024)),
('summary', models.TextField()),
('url', models.URLField(max_length=1024)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Run',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('dork', models.ForeignKey(to='django_google_dork.Dork')),
('result_set', models.ManyToManyField(to='django_google_dork.Result')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('title', 'summary', 'url')]),
),
migrations.AlterUniqueTogether(
name='dork',
unique_together=set([('campaign', 'query')]),
),
migrations.CreateModel(
name='SearchEngine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(unique=True, max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='campaign',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='dork',
name='enabled',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='run',
name='engine',
field=models.ForeignKey(default=None, to='django_google_dork.SearchEngine'),
preserve_default=False,
),
]
| chgans/django-google-dork | django_google_dork/migrations/0001_initial.py | Python | bsd-2-clause | 4,143 |
# Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Mechanical Turk"
prefix = "mechanicalturk"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AcceptQualificationRequest = Action("AcceptQualificationRequest")
ApproveAssignment = Action("ApproveAssignment")
ApproveRejectedAssignment = Action("ApproveRejectedAssignment")
AssignQualification = Action("AssignQualification")
AssociateQualificationWithWorker = Action("AssociateQualificationWithWorker")
BlockWorker = Action("BlockWorker")
ChangeHITTypeOfHIT = Action("ChangeHITTypeOfHIT")
CreateAdditionalAssignmentsForHIT = Action("CreateAdditionalAssignmentsForHIT")
CreateHIT = Action("CreateHIT")
CreateHITType = Action("CreateHITType")
CreateHITWithHITType = Action("CreateHITWithHITType")
CreateQualificationType = Action("CreateQualificationType")
CreateWorkerBlock = Action("CreateWorkerBlock")
DeleteHIT = Action("DeleteHIT")
DeleteQualificationType = Action("DeleteQualificationType")
DeleteWorkerBlock = Action("DeleteWorkerBlock")
DisableHIT = Action("DisableHIT")
DisassociateQualificationFromWorker = Action("DisassociateQualificationFromWorker")
DisposeHIT = Action("DisposeHIT")
DisposeQualificationType = Action("DisposeQualificationType")
ExtendHIT = Action("ExtendHIT")
ForceExpireHIT = Action("ForceExpireHIT")
GetAccountBalance = Action("GetAccountBalance")
GetAssignment = Action("GetAssignment")
GetAssignmentsForHIT = Action("GetAssignmentsForHIT")
GetBlockedWorkers = Action("GetBlockedWorkers")
GetBonusPayments = Action("GetBonusPayments")
GetFileUploadURL = Action("GetFileUploadURL")
GetHIT = Action("GetHIT")
GetHITsForQualificationType = Action("GetHITsForQualificationType")
GetQualificationRequests = Action("GetQualificationRequests")
GetQualificationScore = Action("GetQualificationScore")
GetQualificationType = Action("GetQualificationType")
GetQualificationsForQualificationType = Action("GetQualificationsForQualificationType")
GetRequesterStatistic = Action("GetRequesterStatistic")
GetRequesterWorkerStatistic = Action("GetRequesterWorkerStatistic")
GetReviewResultsForHIT = Action("GetReviewResultsForHIT")
GetReviewableHITs = Action("GetReviewableHITs")
GrantBonus = Action("GrantBonus")
GrantQualification = Action("GrantQualification")
ListAssignmentsForHIT = Action("ListAssignmentsForHIT")
ListBonusPayments = Action("ListBonusPayments")
ListHITs = Action("ListHITs")
ListHITsForQualificationType = Action("ListHITsForQualificationType")
ListQualificationRequests = Action("ListQualificationRequests")
ListQualificationTypes = Action("ListQualificationTypes")
ListReviewPolicyResultsForHIT = Action("ListReviewPolicyResultsForHIT")
ListReviewableHITs = Action("ListReviewableHITs")
ListWorkerBlocks = Action("ListWorkerBlocks")
ListWorkersWithQualificationType = Action("ListWorkersWithQualificationType")
NotifyWorkers = Action("NotifyWorkers")
RegisterHITType = Action("RegisterHITType")
RejectAssignment = Action("RejectAssignment")
RejectQualificationRequest = Action("RejectQualificationRequest")
RevokeQualification = Action("RevokeQualification")
SearchHITs = Action("SearchHITs")
SearchQualificationTypes = Action("SearchQualificationTypes")
SendBonus = Action("SendBonus")
SendTestEventNotification = Action("SendTestEventNotification")
SetHITAsReviewing = Action("SetHITAsReviewing")
SetHITTypeNotification = Action("SetHITTypeNotification")
UnblockWorker = Action("UnblockWorker")
UpdateExpirationForHIT = Action("UpdateExpirationForHIT")
UpdateHITReviewStatus = Action("UpdateHITReviewStatus")
UpdateHITTypeOfHIT = Action("UpdateHITTypeOfHIT")
UpdateNotificationSettings = Action("UpdateNotificationSettings")
UpdateQualificationScore = Action("UpdateQualificationScore")
UpdateQualificationType = Action("UpdateQualificationType")
| cloudtools/awacs | awacs/mechanicalturk.py | Python | bsd-2-clause | 4,190 |
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from rest_framework import generics
from ..mixins import CampaignMixin
from .serializers import CampaignSerializer
LOGGER = logging.getLogger(__name__)
class CampaignAPIView(CampaignMixin, generics.RetrieveDestroyAPIView):
"""
Retrieves a campaign
Retrieves the details of a ``Campaign``.
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/cowork/campaign/best-practices/ HTTP/1.1
responds
.. code-block:: json
{
"slug": "best-practices",
"account": "envconnect",
"title": "Assessment on Best Practices",
"active": true,
"quizz_mode": false,
"questions": [
{
"path": "/product-design",
"title": "Product Design",
"unit": "assessment-choices",
},
{
"path": "/packaging-design",
"title": "Packaging Design",
"unit": "assessment-choices",
}
]
}
"""
serializer_class = CampaignSerializer
def get_object(self):
return self.campaign
def delete(self, request, *args, **kwargs):
"""
Deletes a campaign
Removes a ``Campaign`` and all associated ``Sample``
from the database.
**Tags**: survey
**Examples**
.. code-block:: http
DELETE /api/cowork/campaign/best-practices/ HTTP/1.1
"""
#pylint:disable=useless-super-delegation
return super(CampaignAPIView, self).delete(request, *args, **kwargs)
| djaodjin/djaodjin-survey | survey/api/campaigns.py | Python | bsd-2-clause | 3,013 |
import unittest
from nose2 import events, loader, session
from nose2.plugins.loader import functions
from nose2.tests._common import TestCase
class TestFunctionLoader(TestCase):
def setUp(self):
self.session = session.Session()
self.loader = loader.PluggableTestLoader(self.session)
self.plugin = functions.Functions(session=self.session)
def test_can_load_test_functions_from_module(self):
class Mod(object):
pass
def test():
pass
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 1)
assert isinstance(event.extraTests[0], unittest.FunctionTestCase)
def test_ignores_generator_functions(self):
class Mod(object):
pass
def test():
yield
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
def test_ignores_functions_that_take_args(self):
class Mod(object):
pass
def test(a):
pass
m = Mod()
m.test = test
event = events.LoadFromModuleEvent(self.loader, m)
self.session.hooks.loadTestsFromModule(event)
self.assertEqual(len(event.extraTests), 0)
| ptthiem/nose2 | nose2/tests/unit/test_functions_loader.py | Python | bsd-2-clause | 1,464 |
Subsets and Splits